Revert "Merge remote-tracking branch 'origin/topic/vern/at-if-analyze'"

This reverts commit 4e797ddbbc, reversing
changes made to 3ac28ba5a2.
This commit is contained in:
Tim Wojtulewicz 2023-05-31 09:20:33 +02:00
parent cfbb7eb8ee
commit 5a3abbe364
78 changed files with 340 additions and 1286 deletions

View file

@ -18,11 +18,11 @@ export {
# If we are not the manager, disable automatically generating masters. We will attach
# clones instead.
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) &analyze
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
redef Broker::table_store_master = F;
@endif
@if ( Broker::table_store_master ) &analyze
@if ( Broker::table_store_master )
global broker_backed_ids: set[string];

View file

@ -347,7 +347,7 @@ function nodeid_topic(id: string): string
return nodeid_topic_prefix + id + "/";
}
@if ( Cluster::is_enabled() ) &analyze
@if ( Cluster::is_enabled() )
event Cluster::hello(name: string, id: string) &priority=10
{
@ -383,6 +383,9 @@ event Cluster::hello(name: string, id: string) &priority=10
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=10
{
if ( ! Cluster::is_enabled() )
return;
local e = Broker::make_event(Cluster::hello, node, Broker::node_id());
Broker::publish(nodeid_topic(endpoint$id), e);
}

View file

@ -64,7 +64,7 @@ function archiver_rotation_format_func(ri: Log::RotationFmtInfo): Log::RotationP
return rval;
}
@if ( Supervisor::is_supervised() ) &analyze
@if ( Supervisor::is_supervised() )
redef Log::default_rotation_dir = "log-queue";

View file

@ -19,7 +19,7 @@ redef Log::enable_remote_logging = T;
## Log rotation interval.
redef Log::default_rotation_interval = 24 hrs;
@if ( ! Supervisor::is_supervised() ) &analyze
@if ( ! Supervisor::is_supervised() )
## Use the cluster's delete-log script.
redef Log::default_rotation_postprocessor_cmd = "delete-log";
@endif

View file

@ -13,7 +13,7 @@ redef Log::enable_remote_logging = T;
redef Log::default_rotation_interval = 24hrs;
@if ( ! Supervisor::is_supervised() ) &analyze
@if ( ! Supervisor::is_supervised() )
## Use the cluster's delete-log script.
redef Log::default_rotation_postprocessor_cmd = "delete-log";
@endif

View file

@ -12,7 +12,7 @@ redef Log::enable_remote_logging = T;
redef Log::default_rotation_interval = 24hrs;
@if ( ! Supervisor::is_supervised() ) &analyze
@if ( ! Supervisor::is_supervised() )
## Use the cluster's delete-log script.
redef Log::default_rotation_postprocessor_cmd = "delete-log";
@endif

View file

@ -47,6 +47,7 @@ export {
global set_value: function(ID: string, val: any, location: string &default = ""): bool;
}
@if ( Cluster::is_enabled() )
type OptionCacheValue: record {
val: any;
location: string;
@ -56,21 +57,18 @@ global option_cache: table[string] of OptionCacheValue;
global Config::cluster_set_option: event(ID: string, val: any, location: string);
@if ( Cluster::is_enabled() ) &analyze
function broadcast_option(ID: string, val: any, location: string)
function broadcast_option(ID: string, val: any, location: string) &is_used
{
for ( topic in Cluster::broadcast_topics )
Broker::publish(topic, Config::cluster_set_option, ID, val, location);
}
event Config::cluster_set_option(ID: string, val: any, location: string) &is_used
event Config::cluster_set_option(ID: string, val: any, location: string)
{
if ( Cluster::local_node_type() == Cluster::MANAGER )
{
option_cache[ID] = OptionCacheValue($val=val, $location=location);
broadcast_option(ID, val, location);
}
@if ( Cluster::local_node_type() == Cluster::MANAGER )
option_cache[ID] = OptionCacheValue($val=val, $location=location);
broadcast_option(ID, val, location);
@endif
Option::set(ID, val, location);
}
@ -87,14 +85,13 @@ function set_value(ID: string, val: any, location: string &default = ""): bool
if ( ! Option::set(ID, val, location) )
return F;
if ( Cluster::local_node_type() == Cluster::MANAGER )
{
option_cache[ID] = OptionCacheValue($val=val, $location=location);
broadcast_option(ID, val, location);
}
else
Broker::publish(Cluster::manager_topic, Config::cluster_set_option,
ID, val, location);
@if ( Cluster::local_node_type() == Cluster::MANAGER )
option_cache[ID] = OptionCacheValue($val=val, $location=location);
broadcast_option(ID, val, location);
@else
Broker::publish(Cluster::manager_topic, Config::cluster_set_option,
ID, val, location);
@endif
return T;
}
@ -105,7 +102,7 @@ function set_value(ID: string, val: any, location: string &default = ""): bool
}
@endif # Cluster::is_enabled
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) &analyze
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER )
# Handling of new worker nodes.
event Cluster::node_up(name: string, id: string) &priority=-10
{
@ -159,9 +156,10 @@ event zeek_init() &priority=10
Log::create_stream(LOG, [$columns=Info, $ev=log_config, $path="config", $policy=log_policy]);
# Limit logging to the manager - everyone else just feeds off it.
if ( !Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER )
# Iterate over all existing options and add ourselves as change handlers
# with a low priority so that we can log the changes.
for ( opt in global_options() )
Option::set_change_handler(opt, config_option_changed, -100);
@if ( !Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER )
# Iterate over all existing options and add ourselves as change handlers
# with a low priority so that we can log the changes.
for ( opt in global_options() )
Option::set_change_handler(opt, config_option_changed, -100);
@endif
}

View file

@ -17,11 +17,11 @@ global insert_indicator: event(item: Item) &is_used;
const send_store_on_node_up = T &redef;
# If this process is not a manager process, we don't want the full metadata.
@if ( Cluster::local_node_type() != Cluster::MANAGER ) &analyze
@if ( Cluster::local_node_type() != Cluster::MANAGER )
redef have_full_data = F;
@endif
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
@if ( Cluster::local_node_type() == Cluster::MANAGER )
event zeek_init()
{
Broker::auto_publish(Cluster::worker_topic, remove_indicator);
@ -73,7 +73,7 @@ event Intel::match_remote(s: Seen) &priority=5
}
@endif
@if ( Cluster::local_node_type() == Cluster::WORKER ) &analyze
@if ( Cluster::local_node_type() == Cluster::WORKER )
event zeek_init()
{
Broker::auto_publish(Cluster::manager_topic, match_remote);
@ -94,7 +94,7 @@ event Intel::insert_indicator(item: Intel::Item) &priority=5
}
@endif
@if ( Cluster::local_node_type() == Cluster::PROXY ) &analyze
@if ( Cluster::local_node_type() == Cluster::PROXY )
event Intel::insert_indicator(item: Intel::Item) &priority=5
{
# Just forwarding from manager to workers.

View file

@ -16,7 +16,7 @@ export {
global cluster_netcontrol_delete_rule: event(id: string, reason: string);
}
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
@if ( Cluster::local_node_type() == Cluster::MANAGER )
event zeek_init()
{
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_added);
@ -93,7 +93,7 @@ function remove_rule(id: string, reason: string &default="") : bool
}
}
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
@if ( Cluster::local_node_type() == Cluster::MANAGER )
event NetControl::cluster_netcontrol_delete_rule(id: string, reason: string)
{
delete_rule_impl(id, reason);
@ -147,7 +147,7 @@ event rule_error(r: Rule, p: PluginState, msg: string) &priority=-5
@endif
# Workers use the events to keep track in their local state tables
@if ( Cluster::local_node_type() != Cluster::MANAGER ) &analyze
@if ( Cluster::local_node_type() != Cluster::MANAGER )
event rule_new(r: Rule) &priority=5
{

View file

@ -153,17 +153,16 @@ function pretty_print_alarm(out: file, n: Info)
{
local pdescr = "";
if ( Cluster::is_enabled() )
{
pdescr = "local";
@if ( Cluster::is_enabled() )
pdescr = "local";
if ( n?$peer_descr )
pdescr = n$peer_descr;
else if ( n?$peer_name )
pdescr = n$peer_name;
if ( n?$peer_descr )
pdescr = n$peer_descr;
else if ( n?$peer_name )
pdescr = n$peer_name;
pdescr = fmt("<%s> ", pdescr);
}
pdescr = fmt("<%s> ", pdescr);
@endif
local msg = fmt( "%s%s", pdescr, n$msg);

View file

@ -539,9 +539,9 @@ hook Notice::notice(n: Notice::Info) &priority=-5
{
event Notice::begin_suppression(n$ts, n$suppress_for, n$note, n$identifier);
suppressing[n$note, n$identifier] = n$ts + n$suppress_for;
if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
event Notice::manager_begin_suppression(n$ts, n$suppress_for, n$note, n$identifier);
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
event Notice::manager_begin_suppression(n$ts, n$suppress_for, n$note, n$identifier);
@endif
}
}
@ -552,7 +552,7 @@ event Notice::begin_suppression(ts: time, suppress_for: interval, note: Type,
suppressing[note, identifier] = suppress_until;
}
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) &analyze
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER )
event zeek_init()
{
Broker::auto_publish(Cluster::worker_topic, Notice::begin_suppression);
@ -566,7 +566,7 @@ event Notice::manager_begin_suppression(ts: time, suppress_for: interval, note:
}
@endif
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) &analyze
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
event zeek_init()
{
Broker::auto_publish(Cluster::manager_topic, Notice::manager_begin_suppression);
@ -644,14 +644,13 @@ function apply_policy(n: Notice::Info)
if ( ! n?$ts )
n$ts = network_time();
if ( Cluster::is_enabled() )
{
if ( ! n?$peer_name )
n$peer_name = Cluster::node;
@if ( Cluster::is_enabled() )
if ( ! n?$peer_name )
n$peer_name = Cluster::node;
if ( ! n?$peer_descr )
n$peer_descr = Cluster::node;
}
if ( ! n?$peer_descr )
n$peer_descr = Cluster::node;
@endif
if ( n?$f )
populate_file_info(n$f, n);

View file

@ -13,7 +13,7 @@ export {
global cluster_flow_clear: event(name: string);
}
@if ( Cluster::local_node_type() != Cluster::MANAGER ) &analyze
@if ( Cluster::local_node_type() != Cluster::MANAGER )
# Workers need ability to forward commands to manager.
event zeek_init()
{
@ -49,7 +49,7 @@ function flow_clear(controller: Controller): bool
return T;
}
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
@if ( Cluster::local_node_type() == Cluster::MANAGER )
event OpenFlow::cluster_flow_mod(name: string, match: ofp_match, flow_mod: ofp_flow_mod)
{
if ( name !in name_to_controller )

View file

@ -544,10 +544,11 @@ function found(id: conn_id, info: Info): bool
return F;
}
if ( Cluster::is_enabled() )
@if ( Cluster::is_enabled() )
Cluster::publish_hrw(Cluster::proxy_pool, info$host, Software::new, info);
else
@else
event Software::new(info);
@endif
return T;
}

View file

@ -59,7 +59,7 @@ export {
# intermediate updates so they don't overwhelm the manager.
global recent_global_view_keys: set[string, Key] &create_expire=1min;
@if ( Cluster::local_node_type() != Cluster::MANAGER ) &analyze
@if ( Cluster::local_node_type() != Cluster::MANAGER )
event zeek_init() &priority=100
{
@ -207,7 +207,7 @@ function request_key(ss_name: string, key: Key): Result
@endif
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
@if ( Cluster::local_node_type() == Cluster::MANAGER )
event zeek_init() &priority=100
{

View file

@ -12,7 +12,7 @@
redef Broker::metrics_export_endpoint_name = Cluster::node;
# The manager opens port 9911 and imports metrics from all nodes by default.
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
@if ( Cluster::local_node_type() == Cluster::MANAGER )
redef Broker::metrics_port = 9911/tcp;
redef Broker::metrics_import_topics = vector("zeek/cluster/metrics/");