mirror of
https://github.com/zeek/zeek.git
synced 2025-10-11 11:08:20 +00:00
Migrate table-based for-loops to key-value iteration
This commit is contained in:
parent
41c7b229d3
commit
01d303b480
36 changed files with 150 additions and 153 deletions
|
@ -340,10 +340,8 @@ event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=
|
|||
|
||||
event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) &priority=10
|
||||
{
|
||||
for ( node_name in nodes )
|
||||
for ( node_name, n in nodes )
|
||||
{
|
||||
local n = nodes[node_name];
|
||||
|
||||
if ( n?$id && n$id == endpoint$id )
|
||||
{
|
||||
Cluster::log(fmt("node down: %s", node_name));
|
||||
|
|
|
@ -246,10 +246,8 @@ event Cluster::node_down(name: string, id: string) &priority=10
|
|||
|
||||
function site_id_in_pool(pool: Pool, site_id: count): bool
|
||||
{
|
||||
for ( i in pool$nodes )
|
||||
for ( i, pn in pool$nodes )
|
||||
{
|
||||
local pn = pool$nodes[i];
|
||||
|
||||
if ( pn$site_id == site_id )
|
||||
return T;
|
||||
}
|
||||
|
@ -395,10 +393,8 @@ event bro_init() &priority=-5
|
|||
pet$excluded += pool$spec$max_nodes;
|
||||
}
|
||||
|
||||
for ( nt in pool_eligibility )
|
||||
for ( nt, pet in pool_eligibility )
|
||||
{
|
||||
pet = pool_eligibility[nt];
|
||||
|
||||
if ( pet$excluded > |pet$eligible_nodes| )
|
||||
Reporter::fatal(fmt("not enough %s nodes to satisfy pool exclusivity requirements: need %d nodes", nt, pet$excluded));
|
||||
}
|
||||
|
|
|
@ -159,9 +159,9 @@ event bro_init() &priority=10
|
|||
# Iterate over all existing options and add ourselves as change handlers
|
||||
# with a low priority so that we can log the changes.
|
||||
local gids = global_ids();
|
||||
for ( i in gids )
|
||||
for ( i, gid in gids )
|
||||
{
|
||||
if ( ! gids[i]$option_value )
|
||||
if ( ! gid$option_value )
|
||||
next;
|
||||
|
||||
Option::set_change_handler(i, config_option_changed, -100);
|
||||
|
|
|
@ -53,8 +53,8 @@ hook extend_match(info: Info, s: Seen, items: set[Item]) &priority=6
|
|||
|
||||
if ( s$f?$conns && |s$f$conns| == 1 )
|
||||
{
|
||||
for ( cid in s$f$conns )
|
||||
s$conn = s$f$conns[cid];
|
||||
for ( cid, c in s$f$conns )
|
||||
s$conn = c;
|
||||
}
|
||||
|
||||
if ( ! info?$file_mime_type && s$f?$info && s$f$info?$mime_type )
|
||||
|
|
|
@ -235,8 +235,8 @@ function expire_host_data(data: table[addr] of MetaDataTable, idx: addr): interv
|
|||
{
|
||||
local meta_tbl: MetaDataTable = data[idx];
|
||||
local metas: set[MetaData];
|
||||
for ( src in meta_tbl )
|
||||
add metas[meta_tbl[src]];
|
||||
for ( src, md in meta_tbl )
|
||||
add metas[md];
|
||||
|
||||
return expire_item(cat(idx), ADDR, metas);
|
||||
}
|
||||
|
@ -245,8 +245,8 @@ function expire_subnet_data(data: table[subnet] of MetaDataTable, idx: subnet):
|
|||
{
|
||||
local meta_tbl: MetaDataTable = data[idx];
|
||||
local metas: set[MetaData];
|
||||
for ( src in meta_tbl )
|
||||
add metas[meta_tbl[src]];
|
||||
for ( src, md in meta_tbl )
|
||||
add metas[md];
|
||||
|
||||
return expire_item(cat(idx), SUBNET, metas);
|
||||
}
|
||||
|
@ -259,8 +259,8 @@ function expire_string_data(data: table[string, Type] of MetaDataTable, idx: any
|
|||
|
||||
local meta_tbl: MetaDataTable = data[indicator, indicator_type];
|
||||
local metas: set[MetaData];
|
||||
for ( src in meta_tbl )
|
||||
add metas[meta_tbl[src]];
|
||||
for ( src, md in meta_tbl )
|
||||
add metas[md];
|
||||
|
||||
return expire_item(indicator, indicator_type, metas);
|
||||
}
|
||||
|
@ -306,20 +306,19 @@ function get_items(s: Seen): set[Item]
|
|||
if ( s$host in data_store$host_data )
|
||||
{
|
||||
mt = data_store$host_data[s$host];
|
||||
for ( m in mt )
|
||||
for ( m, md in mt )
|
||||
{
|
||||
add return_data[Item($indicator=cat(s$host), $indicator_type=ADDR, $meta=mt[m])];
|
||||
add return_data[Item($indicator=cat(s$host), $indicator_type=ADDR, $meta=md)];
|
||||
}
|
||||
}
|
||||
# See if the host is part of a known subnet, which has meta values
|
||||
local nets: table[subnet] of MetaDataTable;
|
||||
nets = filter_subnet_table(addr_to_subnet(s$host), data_store$subnet_data);
|
||||
for ( n in nets )
|
||||
for ( n, mt in nets )
|
||||
{
|
||||
mt = nets[n];
|
||||
for ( m in mt )
|
||||
for ( m, md in mt )
|
||||
{
|
||||
add return_data[Item($indicator=cat(n), $indicator_type=SUBNET, $meta=mt[m])];
|
||||
add return_data[Item($indicator=cat(n), $indicator_type=SUBNET, $meta=md)];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -330,9 +329,9 @@ function get_items(s: Seen): set[Item]
|
|||
if ( [lower_indicator, s$indicator_type] in data_store$string_data )
|
||||
{
|
||||
mt = data_store$string_data[lower_indicator, s$indicator_type];
|
||||
for ( m in mt )
|
||||
for ( m, md in mt )
|
||||
{
|
||||
add return_data[Item($indicator=s$indicator, $indicator_type=s$indicator_type, $meta=mt[m])];
|
||||
add return_data[Item($indicator=s$indicator, $indicator_type=s$indicator_type, $meta=md)];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -569,10 +569,10 @@ function create_file_info(f: fa_file): Notice::FileInfo
|
|||
fi$mime = f$info$mime_type;
|
||||
|
||||
if ( f?$conns && |f$conns| == 1 )
|
||||
for ( id in f$conns )
|
||||
for ( id, c in f$conns )
|
||||
{
|
||||
fi$cid = id;
|
||||
fi$cuid = f$conns[id]$uid;
|
||||
fi$cuid = c$uid;
|
||||
}
|
||||
|
||||
return fi;
|
||||
|
|
|
@ -162,16 +162,16 @@ event bro_init() &priority=5
|
|||
Log::create_stream(PacketFilter::LOG, [$columns=Info, $path="packet_filter"]);
|
||||
|
||||
# Preverify the capture and restrict filters to give more granular failure messages.
|
||||
for ( id in capture_filters )
|
||||
for ( id, cf in capture_filters )
|
||||
{
|
||||
if ( ! test_filter(capture_filters[id]) )
|
||||
Reporter::fatal(fmt("Invalid capture_filter named '%s' - '%s'", id, capture_filters[id]));
|
||||
if ( ! test_filter(cf) )
|
||||
Reporter::fatal(fmt("Invalid capture_filter named '%s' - '%s'", id, cf));
|
||||
}
|
||||
|
||||
for ( id in restrict_filters )
|
||||
for ( id, rf in restrict_filters )
|
||||
{
|
||||
if ( ! test_filter(restrict_filters[id]) )
|
||||
Reporter::fatal(fmt("Invalid restrict filter named '%s' - '%s'", id, restrict_filters[id]));
|
||||
Reporter::fatal(fmt("Invalid restrict filter named '%s' - '%s'", id, rf));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -234,20 +234,20 @@ function build(): string
|
|||
if ( |capture_filters| == 0 && ! enable_auto_protocol_capture_filters )
|
||||
cfilter = default_capture_filter;
|
||||
|
||||
for ( id in capture_filters )
|
||||
cfilter = combine_filters(cfilter, "or", capture_filters[id]);
|
||||
for ( id, cf in capture_filters )
|
||||
cfilter = combine_filters(cfilter, "or", cf);
|
||||
|
||||
if ( enable_auto_protocol_capture_filters )
|
||||
cfilter = combine_filters(cfilter, "or", Analyzer::get_bpf());
|
||||
|
||||
# Apply the restriction filters.
|
||||
local rfilter = "";
|
||||
for ( id in restrict_filters )
|
||||
rfilter = combine_filters(rfilter, "and", restrict_filters[id]);
|
||||
for ( id, rf in restrict_filters )
|
||||
rfilter = combine_filters(rfilter, "and", rf);
|
||||
|
||||
# Apply the dynamic restriction filters.
|
||||
for ( filt in dynamic_restrict_filters )
|
||||
rfilter = combine_filters(rfilter, "and", string_cat("not (", dynamic_restrict_filters[filt], ")"));
|
||||
for ( filt, drf in dynamic_restrict_filters )
|
||||
rfilter = combine_filters(rfilter, "and", string_cat("not (", drf, ")"));
|
||||
|
||||
# Finally, join them into one filter.
|
||||
local filter = combine_filters(cfilter, "and", rfilter);
|
||||
|
|
|
@ -300,17 +300,17 @@ function compose_results(r1: Result, r2: Result): Result
|
|||
{
|
||||
local result: Result = table();
|
||||
|
||||
for ( id in r1 )
|
||||
for ( id, rv in r1 )
|
||||
{
|
||||
result[id] = r1[id];
|
||||
result[id] = rv;
|
||||
}
|
||||
|
||||
for ( id in r2 )
|
||||
for ( id, rv in r2 )
|
||||
{
|
||||
if ( id in r1 )
|
||||
result[id] = compose_resultvals(r1[id], r2[id]);
|
||||
result[id] = compose_resultvals(r1[id], rv);
|
||||
else
|
||||
result[id] = r2[id];
|
||||
result[id] = rv;
|
||||
}
|
||||
|
||||
return result;
|
||||
|
|
|
@ -8,9 +8,9 @@ event SumStats::process_epoch_result(ss: SumStat, now: time, data: ResultTable)
|
|||
local i = 50;
|
||||
local keys_to_delete: vector of SumStats::Key = vector();
|
||||
|
||||
for ( key in data )
|
||||
for ( key, res in data )
|
||||
{
|
||||
ss$epoch_result(now, key, data[key]);
|
||||
ss$epoch_result(now, key, res);
|
||||
keys_to_delete += key;
|
||||
|
||||
if ( --i == 0 )
|
||||
|
@ -37,8 +37,8 @@ event SumStats::finish_epoch(ss: SumStat)
|
|||
local now = network_time();
|
||||
if ( bro_is_terminating() )
|
||||
{
|
||||
for ( key in data )
|
||||
ss$epoch_result(now, key, data[key]);
|
||||
for ( key, val in data )
|
||||
ss$epoch_result(now, key, val);
|
||||
|
||||
if ( ss?$epoch_finished )
|
||||
ss$epoch_finished(now);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue