mirror of
https://github.com/zeek/zeek.git
synced 2025-10-15 04:58:21 +00:00
change base scripts to use run-time if's or @if ... &analyze
This commit is contained in:
parent
e749638380
commit
890010915a
30 changed files with 263 additions and 253 deletions
|
@ -18,11 +18,11 @@ export {
|
|||
|
||||
# If we are not the manager, disable automatically generating masters. We will attach
|
||||
# clones instead.
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) &analyze
|
||||
redef Broker::table_store_master = F;
|
||||
@endif
|
||||
|
||||
@if ( Broker::table_store_master )
|
||||
@if ( Broker::table_store_master ) &analyze
|
||||
|
||||
global broker_backed_ids: set[string];
|
||||
|
||||
|
|
|
@ -347,7 +347,7 @@ function nodeid_topic(id: string): string
|
|||
return nodeid_topic_prefix + id + "/";
|
||||
}
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
@if ( Cluster::is_enabled() ) &analyze
|
||||
|
||||
event Cluster::hello(name: string, id: string) &priority=10
|
||||
{
|
||||
|
@ -383,9 +383,6 @@ event Cluster::hello(name: string, id: string) &priority=10
|
|||
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=10
|
||||
{
|
||||
if ( ! Cluster::is_enabled() )
|
||||
return;
|
||||
|
||||
local e = Broker::make_event(Cluster::hello, node, Broker::node_id());
|
||||
Broker::publish(nodeid_topic(endpoint$id), e);
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ function archiver_rotation_format_func(ri: Log::RotationFmtInfo): Log::RotationP
|
|||
return rval;
|
||||
}
|
||||
|
||||
@if ( Supervisor::is_supervised() )
|
||||
@if ( Supervisor::is_supervised() ) &analyze
|
||||
|
||||
redef Log::default_rotation_dir = "log-queue";
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ redef Log::enable_remote_logging = T;
|
|||
## Log rotation interval.
|
||||
redef Log::default_rotation_interval = 24 hrs;
|
||||
|
||||
@if ( ! Supervisor::is_supervised() )
|
||||
@if ( ! Supervisor::is_supervised() ) &analyze
|
||||
## Use the cluster's delete-log script.
|
||||
redef Log::default_rotation_postprocessor_cmd = "delete-log";
|
||||
@endif
|
||||
|
|
|
@ -13,7 +13,7 @@ redef Log::enable_remote_logging = T;
|
|||
|
||||
redef Log::default_rotation_interval = 24hrs;
|
||||
|
||||
@if ( ! Supervisor::is_supervised() )
|
||||
@if ( ! Supervisor::is_supervised() ) &analyze
|
||||
## Use the cluster's delete-log script.
|
||||
redef Log::default_rotation_postprocessor_cmd = "delete-log";
|
||||
@endif
|
||||
|
|
|
@ -12,7 +12,7 @@ redef Log::enable_remote_logging = T;
|
|||
|
||||
redef Log::default_rotation_interval = 24hrs;
|
||||
|
||||
@if ( ! Supervisor::is_supervised() )
|
||||
@if ( ! Supervisor::is_supervised() ) &analyze
|
||||
## Use the cluster's delete-log script.
|
||||
redef Log::default_rotation_postprocessor_cmd = "delete-log";
|
||||
@endif
|
||||
|
|
|
@ -47,7 +47,6 @@ export {
|
|||
global set_value: function(ID: string, val: any, location: string &default = ""): bool;
|
||||
}
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
type OptionCacheValue: record {
|
||||
val: any;
|
||||
location: string;
|
||||
|
@ -57,18 +56,21 @@ global option_cache: table[string] of OptionCacheValue;
|
|||
|
||||
global Config::cluster_set_option: event(ID: string, val: any, location: string);
|
||||
|
||||
function broadcast_option(ID: string, val: any, location: string) &is_used
|
||||
@if ( Cluster::is_enabled() ) &analyze
|
||||
|
||||
function broadcast_option(ID: string, val: any, location: string)
|
||||
{
|
||||
for ( topic in Cluster::broadcast_topics )
|
||||
Broker::publish(topic, Config::cluster_set_option, ID, val, location);
|
||||
}
|
||||
|
||||
event Config::cluster_set_option(ID: string, val: any, location: string)
|
||||
event Config::cluster_set_option(ID: string, val: any, location: string) &is_used
|
||||
{
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
option_cache[ID] = OptionCacheValue($val=val, $location=location);
|
||||
broadcast_option(ID, val, location);
|
||||
@endif
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
{
|
||||
option_cache[ID] = OptionCacheValue($val=val, $location=location);
|
||||
broadcast_option(ID, val, location);
|
||||
}
|
||||
|
||||
Option::set(ID, val, location);
|
||||
}
|
||||
|
@ -85,13 +87,14 @@ function set_value(ID: string, val: any, location: string &default = ""): bool
|
|||
if ( ! Option::set(ID, val, location) )
|
||||
return F;
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
option_cache[ID] = OptionCacheValue($val=val, $location=location);
|
||||
broadcast_option(ID, val, location);
|
||||
@else
|
||||
Broker::publish(Cluster::manager_topic, Config::cluster_set_option,
|
||||
ID, val, location);
|
||||
@endif
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
{
|
||||
option_cache[ID] = OptionCacheValue($val=val, $location=location);
|
||||
broadcast_option(ID, val, location);
|
||||
}
|
||||
else
|
||||
Broker::publish(Cluster::manager_topic, Config::cluster_set_option,
|
||||
ID, val, location);
|
||||
|
||||
return T;
|
||||
}
|
||||
|
@ -102,7 +105,7 @@ function set_value(ID: string, val: any, location: string &default = ""): bool
|
|||
}
|
||||
@endif # Cluster::is_enabled
|
||||
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
# Handling of new worker nodes.
|
||||
event Cluster::node_up(name: string, id: string) &priority=-10
|
||||
{
|
||||
|
@ -156,10 +159,9 @@ event zeek_init() &priority=10
|
|||
Log::create_stream(LOG, [$columns=Info, $ev=log_config, $path="config", $policy=log_policy]);
|
||||
|
||||
# Limit logging to the manager - everyone else just feeds off it.
|
||||
@if ( !Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER )
|
||||
# Iterate over all existing options and add ourselves as change handlers
|
||||
# with a low priority so that we can log the changes.
|
||||
for ( opt in global_options() )
|
||||
Option::set_change_handler(opt, config_option_changed, -100);
|
||||
@endif
|
||||
if ( !Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER )
|
||||
# Iterate over all existing options and add ourselves as change handlers
|
||||
# with a low priority so that we can log the changes.
|
||||
for ( opt in global_options() )
|
||||
Option::set_change_handler(opt, config_option_changed, -100);
|
||||
}
|
||||
|
|
|
@ -17,11 +17,11 @@ global insert_indicator: event(item: Item) &is_used;
|
|||
const send_store_on_node_up = T &redef;
|
||||
|
||||
# If this process is not a manager process, we don't want the full metadata.
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER ) &analyze
|
||||
redef have_full_data = F;
|
||||
@endif
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
event zeek_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::worker_topic, remove_indicator);
|
||||
|
@ -73,7 +73,7 @@ event Intel::match_remote(s: Seen) &priority=5
|
|||
}
|
||||
@endif
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::WORKER )
|
||||
@if ( Cluster::local_node_type() == Cluster::WORKER ) &analyze
|
||||
event zeek_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::manager_topic, match_remote);
|
||||
|
@ -94,7 +94,7 @@ event Intel::insert_indicator(item: Intel::Item) &priority=5
|
|||
}
|
||||
@endif
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::PROXY )
|
||||
@if ( Cluster::local_node_type() == Cluster::PROXY ) &analyze
|
||||
event Intel::insert_indicator(item: Intel::Item) &priority=5
|
||||
{
|
||||
# Just forwarding from manager to workers.
|
||||
|
|
|
@ -16,7 +16,7 @@ export {
|
|||
global cluster_netcontrol_delete_rule: event(id: string, reason: string);
|
||||
}
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
event zeek_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_added);
|
||||
|
@ -93,7 +93,7 @@ function remove_rule(id: string, reason: string &default="") : bool
|
|||
}
|
||||
}
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
event NetControl::cluster_netcontrol_delete_rule(id: string, reason: string)
|
||||
{
|
||||
delete_rule_impl(id, reason);
|
||||
|
@ -147,7 +147,7 @@ event rule_error(r: Rule, p: PluginState, msg: string) &priority=-5
|
|||
@endif
|
||||
|
||||
# Workers use the events to keep track in their local state tables
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER ) &analyze
|
||||
|
||||
event rule_new(r: Rule) &priority=5
|
||||
{
|
||||
|
|
|
@ -153,16 +153,17 @@ function pretty_print_alarm(out: file, n: Info)
|
|||
{
|
||||
local pdescr = "";
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
pdescr = "local";
|
||||
if ( Cluster::is_enabled() )
|
||||
{
|
||||
pdescr = "local";
|
||||
|
||||
if ( n?$peer_descr )
|
||||
pdescr = n$peer_descr;
|
||||
else if ( n?$peer_name )
|
||||
pdescr = n$peer_name;
|
||||
if ( n?$peer_descr )
|
||||
pdescr = n$peer_descr;
|
||||
else if ( n?$peer_name )
|
||||
pdescr = n$peer_name;
|
||||
|
||||
pdescr = fmt("<%s> ", pdescr);
|
||||
@endif
|
||||
pdescr = fmt("<%s> ", pdescr);
|
||||
}
|
||||
|
||||
local msg = fmt( "%s%s", pdescr, n$msg);
|
||||
|
||||
|
|
|
@ -539,9 +539,9 @@ hook Notice::notice(n: Notice::Info) &priority=-5
|
|||
{
|
||||
event Notice::begin_suppression(n$ts, n$suppress_for, n$note, n$identifier);
|
||||
suppressing[n$note, n$identifier] = n$ts + n$suppress_for;
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
event Notice::manager_begin_suppression(n$ts, n$suppress_for, n$note, n$identifier);
|
||||
@endif
|
||||
|
||||
if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
event Notice::manager_begin_suppression(n$ts, n$suppress_for, n$note, n$identifier);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -552,7 +552,7 @@ event Notice::begin_suppression(ts: time, suppress_for: interval, note: Type,
|
|||
suppressing[note, identifier] = suppress_until;
|
||||
}
|
||||
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
event zeek_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::worker_topic, Notice::begin_suppression);
|
||||
|
@ -566,7 +566,7 @@ event Notice::manager_begin_suppression(ts: time, suppress_for: interval, note:
|
|||
}
|
||||
@endif
|
||||
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) &analyze
|
||||
event zeek_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::manager_topic, Notice::manager_begin_suppression);
|
||||
|
@ -644,13 +644,14 @@ function apply_policy(n: Notice::Info)
|
|||
if ( ! n?$ts )
|
||||
n$ts = network_time();
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
if ( ! n?$peer_name )
|
||||
n$peer_name = Cluster::node;
|
||||
if ( Cluster::is_enabled() )
|
||||
{
|
||||
if ( ! n?$peer_name )
|
||||
n$peer_name = Cluster::node;
|
||||
|
||||
if ( ! n?$peer_descr )
|
||||
n$peer_descr = Cluster::node;
|
||||
@endif
|
||||
if ( ! n?$peer_descr )
|
||||
n$peer_descr = Cluster::node;
|
||||
}
|
||||
|
||||
if ( n?$f )
|
||||
populate_file_info(n$f, n);
|
||||
|
|
|
@ -13,7 +13,7 @@ export {
|
|||
global cluster_flow_clear: event(name: string);
|
||||
}
|
||||
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER ) &analyze
|
||||
# Workers need ability to forward commands to manager.
|
||||
event zeek_init()
|
||||
{
|
||||
|
@ -49,7 +49,7 @@ function flow_clear(controller: Controller): bool
|
|||
return T;
|
||||
}
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
event OpenFlow::cluster_flow_mod(name: string, match: ofp_match, flow_mod: ofp_flow_mod)
|
||||
{
|
||||
if ( name !in name_to_controller )
|
||||
|
|
|
@ -544,11 +544,10 @@ function found(id: conn_id, info: Info): bool
|
|||
return F;
|
||||
}
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
if ( Cluster::is_enabled() )
|
||||
Cluster::publish_hrw(Cluster::proxy_pool, info$host, Software::new, info);
|
||||
@else
|
||||
else
|
||||
event Software::new(info);
|
||||
@endif
|
||||
|
||||
return T;
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ export {
|
|||
# intermediate updates so they don't overwhelm the manager.
|
||||
global recent_global_view_keys: set[string, Key] &create_expire=1min;
|
||||
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER ) &analyze
|
||||
|
||||
event zeek_init() &priority=100
|
||||
{
|
||||
|
@ -207,7 +207,7 @@ function request_key(ss_name: string, key: Key): Result
|
|||
@endif
|
||||
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
|
||||
event zeek_init() &priority=100
|
||||
{
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
redef Broker::metrics_export_endpoint_name = Cluster::node;
|
||||
|
||||
# The manager opens port 9911 and imports metrics from all nodes by default.
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
redef Broker::metrics_port = 9911/tcp;
|
||||
redef Broker::metrics_import_topics = vector("zeek/cluster/metrics/");
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue