mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 06:38:20 +00:00
Migrate table-based for-loops to key-value iteration
This commit is contained in:
parent
41c7b229d3
commit
01d303b480
36 changed files with 150 additions and 153 deletions
|
@ -289,9 +289,9 @@ event file_state_remove(f: fa_file)
|
|||
{
|
||||
# In case any events never had matching packets, flush
|
||||
# the extras to the log.
|
||||
for ( i in f$u2_events )
|
||||
for ( i, ev in f$u2_events )
|
||||
{
|
||||
Log::write(LOG, create_info(f$u2_events[i]));
|
||||
Log::write(LOG, create_info(ev));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -340,10 +340,8 @@ event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=
|
|||
|
||||
event Broker::peer_lost(endpoint: Broker::EndpointInfo, msg: string) &priority=10
|
||||
{
|
||||
for ( node_name in nodes )
|
||||
for ( node_name, n in nodes )
|
||||
{
|
||||
local n = nodes[node_name];
|
||||
|
||||
if ( n?$id && n$id == endpoint$id )
|
||||
{
|
||||
Cluster::log(fmt("node down: %s", node_name));
|
||||
|
|
|
@ -246,10 +246,8 @@ event Cluster::node_down(name: string, id: string) &priority=10
|
|||
|
||||
function site_id_in_pool(pool: Pool, site_id: count): bool
|
||||
{
|
||||
for ( i in pool$nodes )
|
||||
for ( i, pn in pool$nodes )
|
||||
{
|
||||
local pn = pool$nodes[i];
|
||||
|
||||
if ( pn$site_id == site_id )
|
||||
return T;
|
||||
}
|
||||
|
@ -395,10 +393,8 @@ event bro_init() &priority=-5
|
|||
pet$excluded += pool$spec$max_nodes;
|
||||
}
|
||||
|
||||
for ( nt in pool_eligibility )
|
||||
for ( nt, pet in pool_eligibility )
|
||||
{
|
||||
pet = pool_eligibility[nt];
|
||||
|
||||
if ( pet$excluded > |pet$eligible_nodes| )
|
||||
Reporter::fatal(fmt("not enough %s nodes to satisfy pool exclusivity requirements: need %d nodes", nt, pet$excluded));
|
||||
}
|
||||
|
|
|
@ -159,9 +159,9 @@ event bro_init() &priority=10
|
|||
# Iterate over all existing options and add ourselves as change handlers
|
||||
# with a low priority so that we can log the changes.
|
||||
local gids = global_ids();
|
||||
for ( i in gids )
|
||||
for ( i, gid in gids )
|
||||
{
|
||||
if ( ! gids[i]$option_value )
|
||||
if ( ! gid$option_value )
|
||||
next;
|
||||
|
||||
Option::set_change_handler(i, config_option_changed, -100);
|
||||
|
|
|
@ -53,8 +53,8 @@ hook extend_match(info: Info, s: Seen, items: set[Item]) &priority=6
|
|||
|
||||
if ( s$f?$conns && |s$f$conns| == 1 )
|
||||
{
|
||||
for ( cid in s$f$conns )
|
||||
s$conn = s$f$conns[cid];
|
||||
for ( cid, c in s$f$conns )
|
||||
s$conn = c;
|
||||
}
|
||||
|
||||
if ( ! info?$file_mime_type && s$f?$info && s$f$info?$mime_type )
|
||||
|
|
|
@ -235,8 +235,8 @@ function expire_host_data(data: table[addr] of MetaDataTable, idx: addr): interv
|
|||
{
|
||||
local meta_tbl: MetaDataTable = data[idx];
|
||||
local metas: set[MetaData];
|
||||
for ( src in meta_tbl )
|
||||
add metas[meta_tbl[src]];
|
||||
for ( src, md in meta_tbl )
|
||||
add metas[md];
|
||||
|
||||
return expire_item(cat(idx), ADDR, metas);
|
||||
}
|
||||
|
@ -245,8 +245,8 @@ function expire_subnet_data(data: table[subnet] of MetaDataTable, idx: subnet):
|
|||
{
|
||||
local meta_tbl: MetaDataTable = data[idx];
|
||||
local metas: set[MetaData];
|
||||
for ( src in meta_tbl )
|
||||
add metas[meta_tbl[src]];
|
||||
for ( src, md in meta_tbl )
|
||||
add metas[md];
|
||||
|
||||
return expire_item(cat(idx), SUBNET, metas);
|
||||
}
|
||||
|
@ -259,8 +259,8 @@ function expire_string_data(data: table[string, Type] of MetaDataTable, idx: any
|
|||
|
||||
local meta_tbl: MetaDataTable = data[indicator, indicator_type];
|
||||
local metas: set[MetaData];
|
||||
for ( src in meta_tbl )
|
||||
add metas[meta_tbl[src]];
|
||||
for ( src, md in meta_tbl )
|
||||
add metas[md];
|
||||
|
||||
return expire_item(indicator, indicator_type, metas);
|
||||
}
|
||||
|
@ -306,20 +306,19 @@ function get_items(s: Seen): set[Item]
|
|||
if ( s$host in data_store$host_data )
|
||||
{
|
||||
mt = data_store$host_data[s$host];
|
||||
for ( m in mt )
|
||||
for ( m, md in mt )
|
||||
{
|
||||
add return_data[Item($indicator=cat(s$host), $indicator_type=ADDR, $meta=mt[m])];
|
||||
add return_data[Item($indicator=cat(s$host), $indicator_type=ADDR, $meta=md)];
|
||||
}
|
||||
}
|
||||
# See if the host is part of a known subnet, which has meta values
|
||||
local nets: table[subnet] of MetaDataTable;
|
||||
nets = filter_subnet_table(addr_to_subnet(s$host), data_store$subnet_data);
|
||||
for ( n in nets )
|
||||
for ( n, mt in nets )
|
||||
{
|
||||
mt = nets[n];
|
||||
for ( m in mt )
|
||||
for ( m, md in mt )
|
||||
{
|
||||
add return_data[Item($indicator=cat(n), $indicator_type=SUBNET, $meta=mt[m])];
|
||||
add return_data[Item($indicator=cat(n), $indicator_type=SUBNET, $meta=md)];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -330,9 +329,9 @@ function get_items(s: Seen): set[Item]
|
|||
if ( [lower_indicator, s$indicator_type] in data_store$string_data )
|
||||
{
|
||||
mt = data_store$string_data[lower_indicator, s$indicator_type];
|
||||
for ( m in mt )
|
||||
for ( m, md in mt )
|
||||
{
|
||||
add return_data[Item($indicator=s$indicator, $indicator_type=s$indicator_type, $meta=mt[m])];
|
||||
add return_data[Item($indicator=s$indicator, $indicator_type=s$indicator_type, $meta=md)];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -569,10 +569,10 @@ function create_file_info(f: fa_file): Notice::FileInfo
|
|||
fi$mime = f$info$mime_type;
|
||||
|
||||
if ( f?$conns && |f$conns| == 1 )
|
||||
for ( id in f$conns )
|
||||
for ( id, c in f$conns )
|
||||
{
|
||||
fi$cid = id;
|
||||
fi$cuid = f$conns[id]$uid;
|
||||
fi$cuid = c$uid;
|
||||
}
|
||||
|
||||
return fi;
|
||||
|
|
|
@ -162,16 +162,16 @@ event bro_init() &priority=5
|
|||
Log::create_stream(PacketFilter::LOG, [$columns=Info, $path="packet_filter"]);
|
||||
|
||||
# Preverify the capture and restrict filters to give more granular failure messages.
|
||||
for ( id in capture_filters )
|
||||
for ( id, cf in capture_filters )
|
||||
{
|
||||
if ( ! test_filter(capture_filters[id]) )
|
||||
Reporter::fatal(fmt("Invalid capture_filter named '%s' - '%s'", id, capture_filters[id]));
|
||||
if ( ! test_filter(cf) )
|
||||
Reporter::fatal(fmt("Invalid capture_filter named '%s' - '%s'", id, cf));
|
||||
}
|
||||
|
||||
for ( id in restrict_filters )
|
||||
for ( id, rf in restrict_filters )
|
||||
{
|
||||
if ( ! test_filter(restrict_filters[id]) )
|
||||
Reporter::fatal(fmt("Invalid restrict filter named '%s' - '%s'", id, restrict_filters[id]));
|
||||
Reporter::fatal(fmt("Invalid restrict filter named '%s' - '%s'", id, rf));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -234,20 +234,20 @@ function build(): string
|
|||
if ( |capture_filters| == 0 && ! enable_auto_protocol_capture_filters )
|
||||
cfilter = default_capture_filter;
|
||||
|
||||
for ( id in capture_filters )
|
||||
cfilter = combine_filters(cfilter, "or", capture_filters[id]);
|
||||
for ( id, cf in capture_filters )
|
||||
cfilter = combine_filters(cfilter, "or", cf);
|
||||
|
||||
if ( enable_auto_protocol_capture_filters )
|
||||
cfilter = combine_filters(cfilter, "or", Analyzer::get_bpf());
|
||||
|
||||
# Apply the restriction filters.
|
||||
local rfilter = "";
|
||||
for ( id in restrict_filters )
|
||||
rfilter = combine_filters(rfilter, "and", restrict_filters[id]);
|
||||
for ( id, rf in restrict_filters )
|
||||
rfilter = combine_filters(rfilter, "and", rf);
|
||||
|
||||
# Apply the dynamic restriction filters.
|
||||
for ( filt in dynamic_restrict_filters )
|
||||
rfilter = combine_filters(rfilter, "and", string_cat("not (", dynamic_restrict_filters[filt], ")"));
|
||||
for ( filt, drf in dynamic_restrict_filters )
|
||||
rfilter = combine_filters(rfilter, "and", string_cat("not (", drf, ")"));
|
||||
|
||||
# Finally, join them into one filter.
|
||||
local filter = combine_filters(cfilter, "and", rfilter);
|
||||
|
|
|
@ -300,17 +300,17 @@ function compose_results(r1: Result, r2: Result): Result
|
|||
{
|
||||
local result: Result = table();
|
||||
|
||||
for ( id in r1 )
|
||||
for ( id, rv in r1 )
|
||||
{
|
||||
result[id] = r1[id];
|
||||
result[id] = rv;
|
||||
}
|
||||
|
||||
for ( id in r2 )
|
||||
for ( id, rv in r2 )
|
||||
{
|
||||
if ( id in r1 )
|
||||
result[id] = compose_resultvals(r1[id], r2[id]);
|
||||
result[id] = compose_resultvals(r1[id], rv);
|
||||
else
|
||||
result[id] = r2[id];
|
||||
result[id] = rv;
|
||||
}
|
||||
|
||||
return result;
|
||||
|
|
|
@ -8,9 +8,9 @@ event SumStats::process_epoch_result(ss: SumStat, now: time, data: ResultTable)
|
|||
local i = 50;
|
||||
local keys_to_delete: vector of SumStats::Key = vector();
|
||||
|
||||
for ( key in data )
|
||||
for ( key, res in data )
|
||||
{
|
||||
ss$epoch_result(now, key, data[key]);
|
||||
ss$epoch_result(now, key, res);
|
||||
keys_to_delete += key;
|
||||
|
||||
if ( --i == 0 )
|
||||
|
@ -37,8 +37,8 @@ event SumStats::finish_epoch(ss: SumStat)
|
|||
local now = network_time();
|
||||
if ( bro_is_terminating() )
|
||||
{
|
||||
for ( key in data )
|
||||
ss$epoch_result(now, key, data[key]);
|
||||
for ( key, val in data )
|
||||
ss$epoch_result(now, key, val);
|
||||
|
||||
if ( ss?$epoch_finished )
|
||||
ss$epoch_finished(now);
|
||||
|
|
|
@ -215,9 +215,8 @@ event connection_state_remove(c: connection)
|
|||
return;
|
||||
|
||||
# TODO: Go through any remaining dce_rpc requests that haven't been processed with replies.
|
||||
for ( i in c$dce_rpc_backing )
|
||||
for ( i, x in c$dce_rpc_backing )
|
||||
{
|
||||
local x = c$dce_rpc_backing[i];
|
||||
set_state(c, x);
|
||||
|
||||
# In the event that the binding wasn't seen, but the pipe
|
||||
|
|
|
@ -184,9 +184,9 @@ function log_unmatched_msgs_queue(q: Queue::Queue)
|
|||
|
||||
function log_unmatched_msgs(msgs: PendingMessages)
|
||||
{
|
||||
for ( trans_id in msgs )
|
||||
for ( trans_id, q in msgs )
|
||||
{
|
||||
log_unmatched_msgs_queue(msgs[trans_id]);
|
||||
log_unmatched_msgs_queue(q);
|
||||
}
|
||||
|
||||
clear_table(msgs);
|
||||
|
@ -285,8 +285,8 @@ hook set_session(c: connection, msg: dns_msg, is_query: bool) &priority=5
|
|||
else
|
||||
{
|
||||
# Just pick an arbitrary, unpaired query.
|
||||
for ( trans_id in c$dns_state$pending_queries )
|
||||
if ( Queue::len(c$dns_state$pending_queries[trans_id]) > 0 )
|
||||
for ( trans_id, q in c$dns_state$pending_queries )
|
||||
if ( Queue::len(q) > 0 )
|
||||
{
|
||||
c$dns_state$pending_query = pop_msg(c$dns_state$pending_queries, trans_id);
|
||||
break;
|
||||
|
|
|
@ -37,10 +37,10 @@ function describe_file(f: fa_file): string
|
|||
if ( f$source != "FTP" )
|
||||
return "";
|
||||
|
||||
for ( cid in f$conns )
|
||||
for ( cid, c in f$conns )
|
||||
{
|
||||
if ( f$conns[cid]?$ftp )
|
||||
return FTP::describe(f$conns[cid]$ftp);
|
||||
if ( c?$ftp )
|
||||
return FTP::describe(c$ftp);
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
|
|
@ -295,9 +295,9 @@ event connection_state_remove(c: connection) &priority=-5
|
|||
{
|
||||
if ( ! c?$ftp ) return;
|
||||
|
||||
for ( ca in c$ftp$pending_commands )
|
||||
for ( ca, cmdarg in c$ftp$pending_commands )
|
||||
{
|
||||
c$ftp$cmdarg = c$ftp$pending_commands[ca];
|
||||
c$ftp$cmdarg = cmdarg;
|
||||
ftp_message(c$ftp);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -91,9 +91,8 @@ function get_pending_cmd(pc: PendingCmds, reply_code: count, reply_msg: string):
|
|||
local best_seq = 0;
|
||||
local best_score: int = -1;
|
||||
|
||||
for ( cmd_seq in pc )
|
||||
for ( cmd_seq, cmd in pc )
|
||||
{
|
||||
local cmd = pc[cmd_seq];
|
||||
local score: int = 0;
|
||||
|
||||
# if the command is compatible with the reply code
|
||||
|
|
|
@ -40,10 +40,10 @@ function describe_file(f: fa_file): string
|
|||
if ( f$source != "HTTP" )
|
||||
return "";
|
||||
|
||||
for ( cid in f$conns )
|
||||
for ( cid, c in f$conns )
|
||||
{
|
||||
if ( f$conns[cid]?$http )
|
||||
return build_url_http(f$conns[cid]$http);
|
||||
if ( c?$http )
|
||||
return build_url_http(c$http);
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
|
|
@ -326,11 +326,11 @@ event connection_state_remove(c: connection) &priority=-5
|
|||
# Flush all pending but incomplete request/response pairs.
|
||||
if ( c?$http_state )
|
||||
{
|
||||
for ( r in c$http_state$pending )
|
||||
for ( r, info in c$http_state$pending )
|
||||
{
|
||||
# We don't use pending elements at index 0.
|
||||
if ( r == 0 ) next;
|
||||
Log::write(HTTP::LOG, c$http_state$pending[r]);
|
||||
Log::write(HTTP::LOG, info);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,10 +65,8 @@ function log_dcc(f: fa_file)
|
|||
{
|
||||
if ( ! f?$conns ) return;
|
||||
|
||||
for ( cid in f$conns )
|
||||
for ( cid, c in f$conns )
|
||||
{
|
||||
local c: connection = f$conns[cid];
|
||||
|
||||
if ( [cid$resp_h, cid$resp_p] !in dcc_expected_transfers ) next;
|
||||
|
||||
local irc = dcc_expected_transfers[cid$resp_h, cid$resp_p];
|
||||
|
|
|
@ -48,11 +48,10 @@ function describe_file(f: fa_file): string
|
|||
# are already populated).
|
||||
#
|
||||
# Just return a bit of our connection information and hope that that is good enough.
|
||||
for ( cid in f$conns )
|
||||
for ( cid, c in f$conns )
|
||||
{
|
||||
if ( f$conns[cid]?$krb )
|
||||
if ( c?$krb )
|
||||
{
|
||||
local c = f$conns[cid];
|
||||
return cat(c$id$resp_h, ":", c$id$resp_p);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -149,13 +149,13 @@ function flush_pending(c: connection)
|
|||
# Flush all pending but incomplete request/response pairs.
|
||||
if ( c?$sip_state )
|
||||
{
|
||||
for ( r in c$sip_state$pending )
|
||||
for ( r, info in c$sip_state$pending )
|
||||
{
|
||||
# We don't use pending elements at index 0.
|
||||
if ( r == 0 )
|
||||
next;
|
||||
|
||||
Log::write(SIP::LOG, c$sip_state$pending[r]);
|
||||
Log::write(SIP::LOG, info);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -293,9 +293,9 @@ event connection_state_remove(c: connection) &priority=-5
|
|||
{
|
||||
if ( c?$sip_state )
|
||||
{
|
||||
for ( r in c$sip_state$pending )
|
||||
for ( r, info in c$sip_state$pending )
|
||||
{
|
||||
Log::write(SIP::LOG, c$sip_state$pending[r]);
|
||||
Log::write(SIP::LOG, info);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,11 +38,10 @@ function describe_file(f: fa_file): string
|
|||
if ( f$source != "SMB" )
|
||||
return "";
|
||||
|
||||
for ( cid in f$conns )
|
||||
for ( cid, c in f$conns )
|
||||
{
|
||||
local info = f$conns[cid];
|
||||
if ( info?$smb_state && info$smb_state?$current_file && info$smb_state$current_file?$name )
|
||||
return info$smb_state$current_file$name;
|
||||
if ( c?$smb_state && c$smb_state?$current_file && c$smb_state$current_file?$name )
|
||||
return c$smb_state$current_file$name;
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
|
|
@ -238,9 +238,8 @@ event file_state_remove(f: fa_file) &priority=-5
|
|||
if ( f$source != "SMB" )
|
||||
return;
|
||||
|
||||
for ( id in f$conns )
|
||||
for ( id, c in f$conns )
|
||||
{
|
||||
local c = f$conns[id];
|
||||
if ( c?$smb_state && c$smb_state?$current_file)
|
||||
{
|
||||
write_file_log(c$smb_state);
|
||||
|
|
|
@ -31,9 +31,8 @@ function describe_file(f: fa_file): string
|
|||
if ( f$source != "SMTP" )
|
||||
return "";
|
||||
|
||||
for ( cid in f$conns )
|
||||
for ( cid, c in f$conns )
|
||||
{
|
||||
local c = f$conns[cid];
|
||||
return SMTP::describe(c$smtp);
|
||||
}
|
||||
return "";
|
||||
|
|
|
@ -66,11 +66,10 @@ function describe_file(f: fa_file): string
|
|||
# are already populated).
|
||||
#
|
||||
# Just return a bit of our connection information and hope that that is good enough.
|
||||
for ( cid in f$conns )
|
||||
for ( cid, c in f$conns )
|
||||
{
|
||||
if ( f$conns[cid]?$ssl )
|
||||
if ( c?$ssl )
|
||||
{
|
||||
local c = f$conns[cid];
|
||||
return cat(c$id$resp_h, ":", c$id$resp_p);
|
||||
}
|
||||
}
|
||||
|
@ -103,12 +102,12 @@ event file_sniff(f: fa_file, meta: fa_metadata) &priority=5
|
|||
|| f$info$mime_type == "application/pkix-cert" ) )
|
||||
return;
|
||||
|
||||
for ( cid in f$conns )
|
||||
{
|
||||
if ( ! f$conns[cid]?$ssl )
|
||||
return;
|
||||
local c: connection;
|
||||
|
||||
local c = f$conns[cid];
|
||||
for ( cid, c in f$conns )
|
||||
{
|
||||
if ( ! c?$ssl )
|
||||
return;
|
||||
}
|
||||
|
||||
if ( ! c$ssl?$cert_chain )
|
||||
|
|
|
@ -57,9 +57,8 @@ function to_json(v: any, only_loggable: bool &default=F, field_escape_pattern: p
|
|||
local rec_parts: string_vec = vector();
|
||||
|
||||
local ft = record_fields(v);
|
||||
for ( field in ft )
|
||||
for ( field, field_desc in ft )
|
||||
{
|
||||
local field_desc = ft[field];
|
||||
# replace the escape pattern in the field.
|
||||
if( field_escape_pattern in field )
|
||||
field = cat(sub(field, field_escape_pattern, ""));
|
||||
|
@ -87,11 +86,11 @@ function to_json(v: any, only_loggable: bool &default=F, field_escape_pattern: p
|
|||
{
|
||||
local tab_parts: vector of string = vector();
|
||||
local ta: table[bool] of any = v;
|
||||
for ( ti in ta )
|
||||
for ( ti, tv in ta )
|
||||
{
|
||||
local ts = to_json(ti);
|
||||
local if_quotes = (ts[0] == "\"") ? "" : "\"";
|
||||
tab_parts += cat(if_quotes, ts, if_quotes, ": ", to_json(ta[ti], only_loggable));
|
||||
tab_parts += cat(if_quotes, ts, if_quotes, ": ", to_json(tv, only_loggable));
|
||||
}
|
||||
return cat("{", join_string_vec(tab_parts, ", "), "}");
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue