mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 06:38:20 +00:00
change base scripts to use run-time if's or @if ... &analyze
This commit is contained in:
parent
e749638380
commit
890010915a
30 changed files with 263 additions and 253 deletions
|
@ -143,10 +143,8 @@ event zeek_init() &priority=5
|
|||
Files::register_for_mime_type(Files::ANALYZER_SHA256, "application/x-x509-ca-cert");
|
||||
Files::register_for_mime_type(Files::ANALYZER_SHA256, "application/pkix-cert");
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
if ( known_log_certs_use_broker )
|
||||
if ( Cluster::is_enabled() && known_log_certs_use_broker )
|
||||
known_log_certs = known_log_certs_with_broker;
|
||||
@endif
|
||||
}
|
||||
|
||||
hook Files::log_policy(rec: Files::Info, id: Log::ID, filter: Log::Filter) &priority=5
|
||||
|
|
|
@ -18,11 +18,11 @@ export {
|
|||
|
||||
# If we are not the manager, disable automatically generating masters. We will attach
|
||||
# clones instead.
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) &analyze
|
||||
redef Broker::table_store_master = F;
|
||||
@endif
|
||||
|
||||
@if ( Broker::table_store_master )
|
||||
@if ( Broker::table_store_master ) &analyze
|
||||
|
||||
global broker_backed_ids: set[string];
|
||||
|
||||
|
|
|
@ -347,7 +347,7 @@ function nodeid_topic(id: string): string
|
|||
return nodeid_topic_prefix + id + "/";
|
||||
}
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
@if ( Cluster::is_enabled() ) &analyze
|
||||
|
||||
event Cluster::hello(name: string, id: string) &priority=10
|
||||
{
|
||||
|
@ -383,9 +383,6 @@ event Cluster::hello(name: string, id: string) &priority=10
|
|||
|
||||
event Broker::peer_added(endpoint: Broker::EndpointInfo, msg: string) &priority=10
|
||||
{
|
||||
if ( ! Cluster::is_enabled() )
|
||||
return;
|
||||
|
||||
local e = Broker::make_event(Cluster::hello, node, Broker::node_id());
|
||||
Broker::publish(nodeid_topic(endpoint$id), e);
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ function archiver_rotation_format_func(ri: Log::RotationFmtInfo): Log::RotationP
|
|||
return rval;
|
||||
}
|
||||
|
||||
@if ( Supervisor::is_supervised() )
|
||||
@if ( Supervisor::is_supervised() ) &analyze
|
||||
|
||||
redef Log::default_rotation_dir = "log-queue";
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ redef Log::enable_remote_logging = T;
|
|||
## Log rotation interval.
|
||||
redef Log::default_rotation_interval = 24 hrs;
|
||||
|
||||
@if ( ! Supervisor::is_supervised() )
|
||||
@if ( ! Supervisor::is_supervised() ) &analyze
|
||||
## Use the cluster's delete-log script.
|
||||
redef Log::default_rotation_postprocessor_cmd = "delete-log";
|
||||
@endif
|
||||
|
|
|
@ -13,7 +13,7 @@ redef Log::enable_remote_logging = T;
|
|||
|
||||
redef Log::default_rotation_interval = 24hrs;
|
||||
|
||||
@if ( ! Supervisor::is_supervised() )
|
||||
@if ( ! Supervisor::is_supervised() ) &analyze
|
||||
## Use the cluster's delete-log script.
|
||||
redef Log::default_rotation_postprocessor_cmd = "delete-log";
|
||||
@endif
|
||||
|
|
|
@ -12,7 +12,7 @@ redef Log::enable_remote_logging = T;
|
|||
|
||||
redef Log::default_rotation_interval = 24hrs;
|
||||
|
||||
@if ( ! Supervisor::is_supervised() )
|
||||
@if ( ! Supervisor::is_supervised() ) &analyze
|
||||
## Use the cluster's delete-log script.
|
||||
redef Log::default_rotation_postprocessor_cmd = "delete-log";
|
||||
@endif
|
||||
|
|
|
@ -47,7 +47,6 @@ export {
|
|||
global set_value: function(ID: string, val: any, location: string &default = ""): bool;
|
||||
}
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
type OptionCacheValue: record {
|
||||
val: any;
|
||||
location: string;
|
||||
|
@ -57,18 +56,21 @@ global option_cache: table[string] of OptionCacheValue;
|
|||
|
||||
global Config::cluster_set_option: event(ID: string, val: any, location: string);
|
||||
|
||||
function broadcast_option(ID: string, val: any, location: string) &is_used
|
||||
@if ( Cluster::is_enabled() ) &analyze
|
||||
|
||||
function broadcast_option(ID: string, val: any, location: string)
|
||||
{
|
||||
for ( topic in Cluster::broadcast_topics )
|
||||
Broker::publish(topic, Config::cluster_set_option, ID, val, location);
|
||||
}
|
||||
|
||||
event Config::cluster_set_option(ID: string, val: any, location: string)
|
||||
event Config::cluster_set_option(ID: string, val: any, location: string) &is_used
|
||||
{
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
option_cache[ID] = OptionCacheValue($val=val, $location=location);
|
||||
broadcast_option(ID, val, location);
|
||||
@endif
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
{
|
||||
option_cache[ID] = OptionCacheValue($val=val, $location=location);
|
||||
broadcast_option(ID, val, location);
|
||||
}
|
||||
|
||||
Option::set(ID, val, location);
|
||||
}
|
||||
|
@ -85,13 +87,14 @@ function set_value(ID: string, val: any, location: string &default = ""): bool
|
|||
if ( ! Option::set(ID, val, location) )
|
||||
return F;
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
option_cache[ID] = OptionCacheValue($val=val, $location=location);
|
||||
broadcast_option(ID, val, location);
|
||||
@else
|
||||
Broker::publish(Cluster::manager_topic, Config::cluster_set_option,
|
||||
ID, val, location);
|
||||
@endif
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
{
|
||||
option_cache[ID] = OptionCacheValue($val=val, $location=location);
|
||||
broadcast_option(ID, val, location);
|
||||
}
|
||||
else
|
||||
Broker::publish(Cluster::manager_topic, Config::cluster_set_option,
|
||||
ID, val, location);
|
||||
|
||||
return T;
|
||||
}
|
||||
|
@ -102,7 +105,7 @@ function set_value(ID: string, val: any, location: string &default = ""): bool
|
|||
}
|
||||
@endif # Cluster::is_enabled
|
||||
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
# Handling of new worker nodes.
|
||||
event Cluster::node_up(name: string, id: string) &priority=-10
|
||||
{
|
||||
|
@ -156,10 +159,9 @@ event zeek_init() &priority=10
|
|||
Log::create_stream(LOG, [$columns=Info, $ev=log_config, $path="config", $policy=log_policy]);
|
||||
|
||||
# Limit logging to the manager - everyone else just feeds off it.
|
||||
@if ( !Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER )
|
||||
# Iterate over all existing options and add ourselves as change handlers
|
||||
# with a low priority so that we can log the changes.
|
||||
for ( opt in global_options() )
|
||||
Option::set_change_handler(opt, config_option_changed, -100);
|
||||
@endif
|
||||
if ( !Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER )
|
||||
# Iterate over all existing options and add ourselves as change handlers
|
||||
# with a low priority so that we can log the changes.
|
||||
for ( opt in global_options() )
|
||||
Option::set_change_handler(opt, config_option_changed, -100);
|
||||
}
|
||||
|
|
|
@ -17,11 +17,11 @@ global insert_indicator: event(item: Item) &is_used;
|
|||
const send_store_on_node_up = T &redef;
|
||||
|
||||
# If this process is not a manager process, we don't want the full metadata.
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER ) &analyze
|
||||
redef have_full_data = F;
|
||||
@endif
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
event zeek_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::worker_topic, remove_indicator);
|
||||
|
@ -73,7 +73,7 @@ event Intel::match_remote(s: Seen) &priority=5
|
|||
}
|
||||
@endif
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::WORKER )
|
||||
@if ( Cluster::local_node_type() == Cluster::WORKER ) &analyze
|
||||
event zeek_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::manager_topic, match_remote);
|
||||
|
@ -94,7 +94,7 @@ event Intel::insert_indicator(item: Intel::Item) &priority=5
|
|||
}
|
||||
@endif
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::PROXY )
|
||||
@if ( Cluster::local_node_type() == Cluster::PROXY ) &analyze
|
||||
event Intel::insert_indicator(item: Intel::Item) &priority=5
|
||||
{
|
||||
# Just forwarding from manager to workers.
|
||||
|
|
|
@ -16,7 +16,7 @@ export {
|
|||
global cluster_netcontrol_delete_rule: event(id: string, reason: string);
|
||||
}
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
event zeek_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::worker_topic, NetControl::rule_added);
|
||||
|
@ -93,7 +93,7 @@ function remove_rule(id: string, reason: string &default="") : bool
|
|||
}
|
||||
}
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
event NetControl::cluster_netcontrol_delete_rule(id: string, reason: string)
|
||||
{
|
||||
delete_rule_impl(id, reason);
|
||||
|
@ -147,7 +147,7 @@ event rule_error(r: Rule, p: PluginState, msg: string) &priority=-5
|
|||
@endif
|
||||
|
||||
# Workers use the events to keep track in their local state tables
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER ) &analyze
|
||||
|
||||
event rule_new(r: Rule) &priority=5
|
||||
{
|
||||
|
|
|
@ -153,16 +153,17 @@ function pretty_print_alarm(out: file, n: Info)
|
|||
{
|
||||
local pdescr = "";
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
pdescr = "local";
|
||||
if ( Cluster::is_enabled() )
|
||||
{
|
||||
pdescr = "local";
|
||||
|
||||
if ( n?$peer_descr )
|
||||
pdescr = n$peer_descr;
|
||||
else if ( n?$peer_name )
|
||||
pdescr = n$peer_name;
|
||||
if ( n?$peer_descr )
|
||||
pdescr = n$peer_descr;
|
||||
else if ( n?$peer_name )
|
||||
pdescr = n$peer_name;
|
||||
|
||||
pdescr = fmt("<%s> ", pdescr);
|
||||
@endif
|
||||
pdescr = fmt("<%s> ", pdescr);
|
||||
}
|
||||
|
||||
local msg = fmt( "%s%s", pdescr, n$msg);
|
||||
|
||||
|
|
|
@ -539,9 +539,9 @@ hook Notice::notice(n: Notice::Info) &priority=-5
|
|||
{
|
||||
event Notice::begin_suppression(n$ts, n$suppress_for, n$note, n$identifier);
|
||||
suppressing[n$note, n$identifier] = n$ts + n$suppress_for;
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
event Notice::manager_begin_suppression(n$ts, n$suppress_for, n$note, n$identifier);
|
||||
@endif
|
||||
|
||||
if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
event Notice::manager_begin_suppression(n$ts, n$suppress_for, n$note, n$identifier);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -552,7 +552,7 @@ event Notice::begin_suppression(ts: time, suppress_for: interval, note: Type,
|
|||
suppressing[note, identifier] = suppress_until;
|
||||
}
|
||||
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
event zeek_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::worker_topic, Notice::begin_suppression);
|
||||
|
@ -566,7 +566,7 @@ event Notice::manager_begin_suppression(ts: time, suppress_for: interval, note:
|
|||
}
|
||||
@endif
|
||||
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER ) &analyze
|
||||
event zeek_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::manager_topic, Notice::manager_begin_suppression);
|
||||
|
@ -644,13 +644,14 @@ function apply_policy(n: Notice::Info)
|
|||
if ( ! n?$ts )
|
||||
n$ts = network_time();
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
if ( ! n?$peer_name )
|
||||
n$peer_name = Cluster::node;
|
||||
if ( Cluster::is_enabled() )
|
||||
{
|
||||
if ( ! n?$peer_name )
|
||||
n$peer_name = Cluster::node;
|
||||
|
||||
if ( ! n?$peer_descr )
|
||||
n$peer_descr = Cluster::node;
|
||||
@endif
|
||||
if ( ! n?$peer_descr )
|
||||
n$peer_descr = Cluster::node;
|
||||
}
|
||||
|
||||
if ( n?$f )
|
||||
populate_file_info(n$f, n);
|
||||
|
|
|
@ -13,7 +13,7 @@ export {
|
|||
global cluster_flow_clear: event(name: string);
|
||||
}
|
||||
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER ) &analyze
|
||||
# Workers need ability to forward commands to manager.
|
||||
event zeek_init()
|
||||
{
|
||||
|
@ -49,7 +49,7 @@ function flow_clear(controller: Controller): bool
|
|||
return T;
|
||||
}
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
event OpenFlow::cluster_flow_mod(name: string, match: ofp_match, flow_mod: ofp_flow_mod)
|
||||
{
|
||||
if ( name !in name_to_controller )
|
||||
|
|
|
@ -544,11 +544,10 @@ function found(id: conn_id, info: Info): bool
|
|||
return F;
|
||||
}
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
if ( Cluster::is_enabled() )
|
||||
Cluster::publish_hrw(Cluster::proxy_pool, info$host, Software::new, info);
|
||||
@else
|
||||
else
|
||||
event Software::new(info);
|
||||
@endif
|
||||
|
||||
return T;
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ export {
|
|||
# intermediate updates so they don't overwhelm the manager.
|
||||
global recent_global_view_keys: set[string, Key] &create_expire=1min;
|
||||
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER ) &analyze
|
||||
|
||||
event zeek_init() &priority=100
|
||||
{
|
||||
|
@ -207,7 +207,7 @@ function request_key(ss_name: string, key: Key): Result
|
|||
@endif
|
||||
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
|
||||
event zeek_init() &priority=100
|
||||
{
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
redef Broker::metrics_export_endpoint_name = Cluster::node;
|
||||
|
||||
# The manager opens port 9911 and imports metrics from all nodes by default.
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
redef Broker::metrics_port = 9911/tcp;
|
||||
redef Broker::metrics_import_topics = vector("zeek/cluster/metrics/");
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ function should_detect(): bool &is_used
|
|||
return F;
|
||||
}
|
||||
|
||||
@if ( should_detect() )
|
||||
@if ( should_detect() ) &analyze
|
||||
|
||||
global saw_tcp_conn_with_data: bool = F;
|
||||
global saw_a_tcp_conn: bool = F;
|
||||
|
|
|
@ -134,7 +134,7 @@ event zeek_init() &priority=5
|
|||
Analyzer::register_for_ports(Analyzer::ANALYZER_DHCP, ports);
|
||||
}
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
@if ( Cluster::is_enabled() ) &analyze
|
||||
event zeek_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::manager_topic, DHCP::aggregate_msgs);
|
||||
|
@ -180,7 +180,7 @@ global join_data: table[count] of Info = table()
|
|||
|
||||
|
||||
|
||||
@if ( ! Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( ! Cluster::is_enabled() || Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
# We are handling this event at priority 1000 because we really want
|
||||
# the DHCP::log_info global to be set correctly before a user might try
|
||||
# to access it.
|
||||
|
|
|
@ -216,27 +216,30 @@ function ftp_message(c: connection)
|
|||
delete s$data_channel;
|
||||
}
|
||||
|
||||
const have_cluster = Cluster::is_enabled();
|
||||
const should_publish =
|
||||
Cluster::local_node_type() == Cluster::PROXY ||
|
||||
Cluster::local_node_type() == Cluster::MANAGER;
|
||||
|
||||
event sync_add_expected_data(s: Info, chan: ExpectedDataChannel) &is_used
|
||||
{
|
||||
@if ( Cluster::local_node_type() == Cluster::PROXY ||
|
||||
Cluster::local_node_type() == Cluster::MANAGER )
|
||||
Broker::publish(Cluster::worker_topic, sync_add_expected_data, minimize_info(s), chan);
|
||||
@else
|
||||
ftp_data_expected[chan$resp_h, chan$resp_p] = s;
|
||||
Analyzer::schedule_analyzer(chan$orig_h, chan$resp_h, chan$resp_p,
|
||||
Analyzer::ANALYZER_FTP_DATA,
|
||||
5mins);
|
||||
@endif
|
||||
if ( should_publish )
|
||||
Broker::publish(Cluster::worker_topic, sync_add_expected_data, minimize_info(s), chan);
|
||||
else
|
||||
{
|
||||
ftp_data_expected[chan$resp_h, chan$resp_p] = s;
|
||||
Analyzer::schedule_analyzer(chan$orig_h, chan$resp_h, chan$resp_p,
|
||||
Analyzer::ANALYZER_FTP_DATA,
|
||||
5mins);
|
||||
}
|
||||
}
|
||||
|
||||
event sync_remove_expected_data(resp_h: addr, resp_p: port) &is_used
|
||||
{
|
||||
@if ( Cluster::local_node_type() == Cluster::PROXY ||
|
||||
Cluster::local_node_type() == Cluster::MANAGER )
|
||||
Broker::publish(Cluster::worker_topic, sync_remove_expected_data, resp_h, resp_p);
|
||||
@else
|
||||
delete ftp_data_expected[resp_h, resp_p];
|
||||
@endif
|
||||
if ( should_publish )
|
||||
Broker::publish(Cluster::worker_topic, sync_remove_expected_data, resp_h, resp_p);
|
||||
else
|
||||
delete ftp_data_expected[resp_h, resp_p];
|
||||
}
|
||||
|
||||
function add_expected_data_channel(s: Info, chan: ExpectedDataChannel)
|
||||
|
@ -247,9 +250,8 @@ function add_expected_data_channel(s: Info, chan: ExpectedDataChannel)
|
|||
Analyzer::schedule_analyzer(chan$orig_h, chan$resp_h, chan$resp_p,
|
||||
Analyzer::ANALYZER_FTP_DATA,
|
||||
5mins);
|
||||
@if ( Cluster::is_enabled() )
|
||||
Broker::publish(ftp_relay_topic(), sync_add_expected_data, minimize_info(s), chan);
|
||||
@endif
|
||||
if ( have_cluster )
|
||||
Broker::publish(ftp_relay_topic(), sync_add_expected_data, minimize_info(s), chan);
|
||||
}
|
||||
|
||||
event ftp_request(c: connection, command: string, arg: string) &priority=5
|
||||
|
@ -464,9 +466,8 @@ hook finalize_ftp_data(c: connection)
|
|||
if ( [c$id$resp_h, c$id$resp_p] in ftp_data_expected )
|
||||
{
|
||||
delete ftp_data_expected[c$id$resp_h, c$id$resp_p];
|
||||
@if ( Cluster::is_enabled() )
|
||||
Broker::publish(ftp_relay_topic(), sync_remove_expected_data, c$id$resp_h, c$id$resp_p);
|
||||
@endif
|
||||
if ( have_cluster )
|
||||
Broker::publish(ftp_relay_topic(), sync_remove_expected_data, c$id$resp_h, c$id$resp_p);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -44,26 +44,29 @@ function dcc_relay_topic(): string &is_used
|
|||
return rval;
|
||||
}
|
||||
|
||||
const have_cluster = Cluster::is_enabled();
|
||||
const should_publish =
|
||||
Cluster::local_node_type() == Cluster::PROXY ||
|
||||
Cluster::local_node_type() == Cluster::MANAGER;
|
||||
|
||||
event dcc_transfer_add(host: addr, p: port, info: Info) &is_used
|
||||
{
|
||||
@if ( Cluster::local_node_type() == Cluster::PROXY ||
|
||||
Cluster::local_node_type() == Cluster::MANAGER )
|
||||
Broker::publish(Cluster::worker_topic, dcc_transfer_add, host, p, info);
|
||||
@else
|
||||
dcc_expected_transfers[host, p] = info;
|
||||
Analyzer::schedule_analyzer(0.0.0.0, host, p,
|
||||
Analyzer::ANALYZER_IRC_DATA, 5 min);
|
||||
@endif
|
||||
if ( should_publish )
|
||||
Broker::publish(Cluster::worker_topic, dcc_transfer_add, host, p, info);
|
||||
else
|
||||
{
|
||||
dcc_expected_transfers[host, p] = info;
|
||||
Analyzer::schedule_analyzer(0.0.0.0, host, p,
|
||||
Analyzer::ANALYZER_IRC_DATA, 5 min);
|
||||
}
|
||||
}
|
||||
|
||||
event dcc_transfer_remove(host: addr, p: port) &is_used
|
||||
{
|
||||
@if ( Cluster::local_node_type() == Cluster::PROXY ||
|
||||
Cluster::local_node_type() == Cluster::MANAGER )
|
||||
Broker::publish(Cluster::worker_topic, dcc_transfer_remove, host, p);
|
||||
@else
|
||||
delete dcc_expected_transfers[host, p];
|
||||
@endif
|
||||
if ( should_publish )
|
||||
Broker::publish(Cluster::worker_topic, dcc_transfer_remove, host, p);
|
||||
else
|
||||
delete dcc_expected_transfers[host, p];
|
||||
}
|
||||
|
||||
function log_dcc(f: fa_file)
|
||||
|
@ -89,10 +92,9 @@ function log_dcc(f: fa_file)
|
|||
|
||||
delete dcc_expected_transfers[cid$resp_h, cid$resp_p];
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
Broker::publish(dcc_relay_topic(), dcc_transfer_remove,
|
||||
cid$resp_h, cid$resp_p);
|
||||
@endif
|
||||
if ( have_cluster )
|
||||
Broker::publish(dcc_relay_topic(), dcc_transfer_remove,
|
||||
cid$resp_h, cid$resp_p);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -117,9 +119,8 @@ event irc_dcc_message(c: connection, is_orig: bool,
|
|||
Analyzer::schedule_analyzer(0.0.0.0, address, p, Analyzer::ANALYZER_IRC_DATA, 5 min);
|
||||
dcc_expected_transfers[address, p] = c$irc;
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
Broker::publish(dcc_relay_topic(), dcc_transfer_add, address, p, c$irc);
|
||||
@endif
|
||||
if ( have_cluster )
|
||||
Broker::publish(dcc_relay_topic(), dcc_transfer_add, address, p, c$irc);
|
||||
}
|
||||
|
||||
event scheduled_analyzer_applied(c: connection, a: Analyzer::Tag) &priority=10
|
||||
|
@ -138,9 +139,8 @@ hook finalize_irc_data(c: connection)
|
|||
{
|
||||
delete dcc_expected_transfers[c$id$resp_h, c$id$resp_p];
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
Broker::publish(dcc_relay_topic(), dcc_transfer_remove,
|
||||
c$id$resp_h, c$id$resp_p);
|
||||
@endif
|
||||
if ( have_cluster )
|
||||
Broker::publish(dcc_relay_topic(), dcc_transfer_remove,
|
||||
c$id$resp_h, c$id$resp_p);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
# For testing, keep persistent state local to the current working directory,
|
||||
# and disable log rotation.
|
||||
@if ( getenv("ZEEK_MANAGEMENT_TESTING") != "" )
|
||||
@if ( getenv("ZEEK_MANAGEMENT_TESTING") != "" ) &analyze
|
||||
|
||||
redef Management::spool_dir = ".";
|
||||
redef Management::state_dir = ".";
|
||||
|
@ -21,7 +21,7 @@ redef Log::default_rotation_interval = 0 secs;
|
|||
# config with the Supervisor; see base/frameworks/cluster/nodes/logger.zeek.
|
||||
redef Log::default_rotation_dir = build_path(Management::get_spool_dir(), "log-queue");
|
||||
|
||||
@if ( getenv("ZEEK_MANAGEMENT_NODE") != "" )
|
||||
@if ( getenv("ZEEK_MANAGEMENT_NODE") != "" ) &analyze
|
||||
|
||||
# Management agents and controllers don't have loggers, nor their configuration,
|
||||
# so establish a similar one here:
|
||||
|
|
|
@ -198,21 +198,25 @@ function populate_log_record(ip: addr, bi: BlockInfo, action: CatchReleaseAction
|
|||
return log;
|
||||
}
|
||||
|
||||
const have_cluster = Cluster::is_enabled();
|
||||
const is_mgr = have_cluster && Cluster::local_node_type() == Cluster::MANAGER;
|
||||
const is_not_mgr = have_cluster && Cluster::local_node_type() != Cluster::MANAGER;
|
||||
|
||||
const single_enforcement_point = ! have_cluster || is_mgr;
|
||||
|
||||
function per_block_interval(t: table[addr] of BlockInfo, idx: addr): interval
|
||||
{
|
||||
local remaining_time = t[idx]$watch_until - network_time();
|
||||
if ( remaining_time < 0secs )
|
||||
remaining_time = 0secs;
|
||||
|
||||
@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) )
|
||||
if ( remaining_time == 0secs )
|
||||
if ( single_enforcement_point && remaining_time == 0 secs )
|
||||
{
|
||||
local log = populate_log_record(idx, t[idx], FORGOTTEN);
|
||||
Log::write(CATCH_RELEASE, log);
|
||||
|
||||
event NetControl::catch_release_forgotten(idx, t[idx]);
|
||||
}
|
||||
@endif
|
||||
|
||||
return remaining_time;
|
||||
}
|
||||
|
@ -225,9 +229,9 @@ global blocks: table[addr] of BlockInfo = {}
|
|||
&expire_func=per_block_interval;
|
||||
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
@if ( have_cluster ) &analyze
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( is_mgr ) &analyze
|
||||
event zeek_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::worker_topic, NetControl::catch_release_block_new);
|
||||
|
@ -259,7 +263,7 @@ function cr_check_rule(r: Rule): bool
|
|||
return F;
|
||||
}
|
||||
|
||||
@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) )
|
||||
@if ( single_enforcement_point ) &analyze
|
||||
|
||||
event rule_added(r: Rule, p: PluginState, msg: string)
|
||||
{
|
||||
|
@ -297,7 +301,7 @@ event rule_timeout(r: Rule, i: FlowInfo, p: PluginState)
|
|||
|
||||
@endif
|
||||
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( is_mgr ) &analyze
|
||||
event catch_release_add(a: addr, location: string)
|
||||
{
|
||||
drop_address_catch_release(a, location);
|
||||
|
@ -314,7 +318,7 @@ event catch_release_encountered(a: addr)
|
|||
}
|
||||
@endif
|
||||
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
@if ( is_not_mgr ) &analyze
|
||||
event catch_release_block_new(a: addr, b: BlockInfo)
|
||||
{
|
||||
blocks[a] = b;
|
||||
|
@ -327,7 +331,7 @@ event catch_release_block_delete(a: addr)
|
|||
}
|
||||
@endif
|
||||
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( is_mgr ) &analyze
|
||||
@endif
|
||||
|
||||
function get_catch_release_info(a: addr): BlockInfo
|
||||
|
@ -360,46 +364,50 @@ function drop_address_catch_release(a: addr, location: string &default=""): Bloc
|
|||
bi = BlockInfo($watch_until=network_time()+catch_release_intervals[1], $current_interval=0, $current_block_id=r$id);
|
||||
if ( location != "" )
|
||||
bi$location = location;
|
||||
@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) )
|
||||
log = populate_log_record(a, bi, ADDED);
|
||||
log$message = "Address already blocked outside of catch-and-release. Catch and release will monitor and only actively block if it appears in network traffic.";
|
||||
Log::write(CATCH_RELEASE, log);
|
||||
blocks[a] = bi;
|
||||
event NetControl::catch_release_block_new(a, bi);
|
||||
@endif
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
event NetControl::catch_release_add(a, location);
|
||||
@endif
|
||||
|
||||
if ( single_enforcement_point )
|
||||
{
|
||||
log = populate_log_record(a, bi, ADDED);
|
||||
log$message = "Address already blocked outside of catch-and-release. Catch and release will monitor and only actively block if it appears in network traffic.";
|
||||
Log::write(CATCH_RELEASE, log);
|
||||
blocks[a] = bi;
|
||||
event NetControl::catch_release_block_new(a, bi);
|
||||
}
|
||||
|
||||
if ( is_not_mgr )
|
||||
event NetControl::catch_release_add(a, location);
|
||||
|
||||
return bi;
|
||||
}
|
||||
|
||||
local block_interval = catch_release_intervals[0];
|
||||
|
||||
@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) )
|
||||
local ret = drop_address(a, block_interval, location);
|
||||
|
||||
if ( ret != "" )
|
||||
if ( single_enforcement_point )
|
||||
{
|
||||
bi = BlockInfo($watch_until=network_time()+catch_release_intervals[1], $block_until=network_time()+block_interval, $current_interval=0, $current_block_id=ret);
|
||||
if ( location != "" )
|
||||
bi$location = location;
|
||||
blocks[a] = bi;
|
||||
event NetControl::catch_release_block_new(a, bi);
|
||||
blocks[a] = bi;
|
||||
log = populate_log_record(a, bi, DROP_REQUESTED);
|
||||
Log::write(CATCH_RELEASE, log);
|
||||
local ret = drop_address(a, block_interval, location);
|
||||
|
||||
if ( ret != "" )
|
||||
{
|
||||
bi = BlockInfo($watch_until=network_time()+catch_release_intervals[1], $block_until=network_time()+block_interval, $current_interval=0, $current_block_id=ret);
|
||||
if ( location != "" )
|
||||
bi$location = location;
|
||||
blocks[a] = bi;
|
||||
event NetControl::catch_release_block_new(a, bi);
|
||||
blocks[a] = bi;
|
||||
log = populate_log_record(a, bi, DROP_REQUESTED);
|
||||
Log::write(CATCH_RELEASE, log);
|
||||
return bi;
|
||||
}
|
||||
Reporter::error(fmt("Catch and release could not add block for %s; failing.", a));
|
||||
return BlockInfo($watch_until=double_to_time(0), $current_interval=0, $current_block_id="");
|
||||
}
|
||||
|
||||
if ( is_not_mgr )
|
||||
{
|
||||
bi = BlockInfo($watch_until=network_time()+catch_release_intervals[1], $block_until=network_time()+block_interval, $current_interval=0, $current_block_id="");
|
||||
event NetControl::catch_release_add(a, location);
|
||||
return bi;
|
||||
}
|
||||
Reporter::error(fmt("Catch and release could not add block for %s; failing.", a));
|
||||
return BlockInfo($watch_until=double_to_time(0), $current_interval=0, $current_block_id="");
|
||||
@endif
|
||||
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
bi = BlockInfo($watch_until=network_time()+catch_release_intervals[1], $block_until=network_time()+block_interval, $current_interval=0, $current_block_id="");
|
||||
event NetControl::catch_release_add(a, location);
|
||||
return bi;
|
||||
@endif
|
||||
|
||||
}
|
||||
|
||||
function unblock_address_catch_release(a: addr, reason: string &default=""): bool
|
||||
|
@ -407,22 +415,22 @@ function unblock_address_catch_release(a: addr, reason: string &default=""): boo
|
|||
if ( a !in blocks )
|
||||
return F;
|
||||
|
||||
@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) )
|
||||
local bi = blocks[a];
|
||||
local log = populate_log_record(a, bi, UNBLOCK);
|
||||
if ( reason != "" )
|
||||
log$message = reason;
|
||||
Log::write(CATCH_RELEASE, log);
|
||||
delete blocks[a];
|
||||
if ( bi?$block_until && bi$block_until > network_time() && bi$current_block_id != "" )
|
||||
remove_rule(bi$current_block_id, reason);
|
||||
@endif
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER )
|
||||
event NetControl::catch_release_block_delete(a);
|
||||
@endif
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
event NetControl::catch_release_delete(a, reason);
|
||||
@endif
|
||||
if ( single_enforcement_point )
|
||||
{
|
||||
local bi = blocks[a];
|
||||
local log = populate_log_record(a, bi, UNBLOCK);
|
||||
if ( reason != "" )
|
||||
log$message = reason;
|
||||
Log::write(CATCH_RELEASE, log);
|
||||
delete blocks[a];
|
||||
if ( bi?$block_until && bi$block_until > network_time() && bi$current_block_id != "" )
|
||||
remove_rule(bi$current_block_id, reason);
|
||||
}
|
||||
|
||||
if ( is_mgr )
|
||||
event NetControl::catch_release_block_delete(a);
|
||||
else if ( is_not_mgr )
|
||||
event NetControl::catch_release_delete(a, reason);
|
||||
|
||||
return T;
|
||||
}
|
||||
|
@ -431,66 +439,66 @@ function catch_release_seen(a: addr)
|
|||
{
|
||||
if ( a in blocks )
|
||||
{
|
||||
@if ( ! Cluster::is_enabled() || ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER ) )
|
||||
local bi = blocks[a];
|
||||
local log: CatchReleaseInfo;
|
||||
local e = Entity($ty=ADDRESS, $ip=addr_to_subnet(a));
|
||||
|
||||
if ( [e,DROP] in rule_entities )
|
||||
if ( single_enforcement_point )
|
||||
{
|
||||
if ( catch_release_warn_blocked_ip_encountered == F )
|
||||
return;
|
||||
local log: CatchReleaseInfo;
|
||||
local e = Entity($ty=ADDRESS, $ip=addr_to_subnet(a));
|
||||
|
||||
# This should be blocked - block has not been applied yet by hardware? Ignore for the moment...
|
||||
log = populate_log_record(a, bi, INFO);
|
||||
log$action = INFO;
|
||||
log$message = "Block seen while in rule_entities. No action taken.";
|
||||
if ( [e,DROP] in rule_entities )
|
||||
{
|
||||
if ( catch_release_warn_blocked_ip_encountered == F )
|
||||
return;
|
||||
|
||||
# This should be blocked - block has not been applied yet by hardware? Ignore for the moment...
|
||||
log = populate_log_record(a, bi, INFO);
|
||||
log$action = INFO;
|
||||
log$message = "Block seen while in rule_entities. No action taken.";
|
||||
Log::write(CATCH_RELEASE, log);
|
||||
return;
|
||||
}
|
||||
|
||||
# ok, this one returned again while still in the backoff period.
|
||||
|
||||
local try = bi$current_interval;
|
||||
if ( (try+1) in catch_release_intervals )
|
||||
++try;
|
||||
|
||||
bi$current_interval = try;
|
||||
if ( (try+1) in catch_release_intervals )
|
||||
bi$watch_until = network_time() + catch_release_intervals[try+1];
|
||||
else
|
||||
bi$watch_until = network_time() + catch_release_intervals[try];
|
||||
|
||||
bi$block_until = network_time() + catch_release_intervals[try];
|
||||
++bi$num_reblocked;
|
||||
|
||||
local block_interval = catch_release_intervals[try];
|
||||
local location = "";
|
||||
if ( bi?$location )
|
||||
location = bi$location;
|
||||
local drop = drop_address(a, block_interval, fmt("Re-drop by catch-and-release: %s", location));
|
||||
bi$current_block_id = drop;
|
||||
|
||||
blocks[a] = bi;
|
||||
|
||||
log = populate_log_record(a, bi, SEEN_AGAIN);
|
||||
Log::write(CATCH_RELEASE, log);
|
||||
return;
|
||||
}
|
||||
|
||||
# ok, this one returned again while still in the backoff period.
|
||||
if ( is_mgr )
|
||||
event NetControl::catch_release_block_new(a, bi);
|
||||
|
||||
local try = bi$current_interval;
|
||||
if ( (try+1) in catch_release_intervals )
|
||||
++try;
|
||||
else if ( is_not_mgr )
|
||||
{
|
||||
if ( a in catch_release_recently_notified )
|
||||
return;
|
||||
|
||||
bi$current_interval = try;
|
||||
if ( (try+1) in catch_release_intervals )
|
||||
bi$watch_until = network_time() + catch_release_intervals[try+1];
|
||||
else
|
||||
bi$watch_until = network_time() + catch_release_intervals[try];
|
||||
|
||||
bi$block_until = network_time() + catch_release_intervals[try];
|
||||
++bi$num_reblocked;
|
||||
|
||||
local block_interval = catch_release_intervals[try];
|
||||
local location = "";
|
||||
if ( bi?$location )
|
||||
location = bi$location;
|
||||
local drop = drop_address(a, block_interval, fmt("Re-drop by catch-and-release: %s", location));
|
||||
bi$current_block_id = drop;
|
||||
|
||||
blocks[a] = bi;
|
||||
|
||||
log = populate_log_record(a, bi, SEEN_AGAIN);
|
||||
Log::write(CATCH_RELEASE, log);
|
||||
@endif
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER )
|
||||
event NetControl::catch_release_block_new(a, bi);
|
||||
@endif
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
if ( a in catch_release_recently_notified )
|
||||
return;
|
||||
|
||||
event NetControl::catch_release_encountered(a);
|
||||
add catch_release_recently_notified[a];
|
||||
@endif
|
||||
|
||||
return;
|
||||
event NetControl::catch_release_encountered(a);
|
||||
add catch_release_recently_notified[a];
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
event new_connection(c: connection)
|
||||
|
|
|
@ -26,7 +26,7 @@ export {
|
|||
};
|
||||
}
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
@if ( Cluster::is_enabled() ) &analyze
|
||||
|
||||
event zeek_init() &priority=5
|
||||
{
|
||||
|
|
|
@ -77,7 +77,7 @@ function observe_weird_stats()
|
|||
SumStats::Observation($dbl=(v + 0.0)));
|
||||
}
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
@if ( Cluster::is_enabled() ) &analyze
|
||||
|
||||
# I'm not sure if this is a hack or not: the manager will generate this
|
||||
# event at the end of its epoch so workers can handle it just in time to
|
||||
|
|
|
@ -96,6 +96,10 @@ event Known::host_found(info: HostsInfo)
|
|||
}
|
||||
}
|
||||
|
||||
const should_log_host =
|
||||
! Cluster::is_enabled() ||
|
||||
Cluster::local_node_type() == Cluster::PROXY;
|
||||
|
||||
event known_host_add(info: HostsInfo)
|
||||
{
|
||||
if ( use_host_store )
|
||||
|
@ -106,10 +110,8 @@ event known_host_add(info: HostsInfo)
|
|||
|
||||
add Known::hosts[info$host];
|
||||
|
||||
@if ( ! Cluster::is_enabled() ||
|
||||
Cluster::local_node_type() == Cluster::PROXY )
|
||||
if ( should_log_host )
|
||||
Log::write(Known::HOSTS_LOG, info);
|
||||
@endif
|
||||
}
|
||||
|
||||
event Cluster::node_up(name: string, id: string)
|
||||
|
|
|
@ -144,6 +144,10 @@ event service_info_commit(info: ServicesInfo)
|
|||
}
|
||||
}
|
||||
|
||||
const should_log_service =
|
||||
! Cluster::is_enabled() ||
|
||||
Cluster::local_node_type() == Cluster::PROXY;
|
||||
|
||||
event known_service_add(info: ServicesInfo)
|
||||
{
|
||||
if ( Known::use_service_store )
|
||||
|
@ -172,10 +176,8 @@ event known_service_add(info: ServicesInfo)
|
|||
}
|
||||
}
|
||||
|
||||
@if ( ! Cluster::is_enabled() ||
|
||||
Cluster::local_node_type() == Cluster::PROXY )
|
||||
if ( should_log_service )
|
||||
Log::write(Known::SERVICES_LOG, info_to_log);
|
||||
@endif
|
||||
}
|
||||
|
||||
event Cluster::node_up(name: string, id: string)
|
||||
|
|
|
@ -41,16 +41,15 @@ export {
|
|||
global add_secret: event(client_random: string, secrets: string);
|
||||
}
|
||||
|
||||
@if ( keylog_file == "" )
|
||||
# If a keylog file was given via an environment variable, let's disable secret expiration - that does not
|
||||
# make sense for pcaps.
|
||||
global secrets: table[string] of string = {} &redef;
|
||||
global keys: table[string] of string = {} &redef;
|
||||
@else
|
||||
global secrets: table[string] of string = {} &read_expire=secret_expiration &redef;
|
||||
global keys: table[string] of string = {} &read_expire=secret_expiration &redef;
|
||||
@endif
|
||||
|
||||
@if ( keylog_file != "" ) &analyze
|
||||
# If a keylog file was given directly (not via an environment variable),
|
||||
# set up secret expiration (which doesn't make sense for PCAPs).
|
||||
redef secrets &read_expire=secret_expiration;
|
||||
redef keys &read_expire=secret_expiration;
|
||||
@endif
|
||||
|
||||
redef record SSL::Info += {
|
||||
# Decryption uses client_random as identifier
|
||||
|
|
|
@ -108,6 +108,10 @@ event Known::cert_found(info: CertsInfo, hash: string)
|
|||
}
|
||||
}
|
||||
|
||||
const should_log_cert =
|
||||
! Cluster::is_enabled() ||
|
||||
Cluster::local_node_type() == Cluster::PROXY;
|
||||
|
||||
event known_cert_add(info: CertsInfo, hash: string)
|
||||
{
|
||||
if ( Known::use_cert_store )
|
||||
|
@ -118,10 +122,8 @@ event known_cert_add(info: CertsInfo, hash: string)
|
|||
|
||||
add Known::certs[info$host, hash];
|
||||
|
||||
@if ( ! Cluster::is_enabled() ||
|
||||
Cluster::local_node_type() == Cluster::PROXY )
|
||||
if ( should_log_cert )
|
||||
Log::write(Known::CERTS_LOG, info);
|
||||
@endif
|
||||
}
|
||||
|
||||
event Known::cert_found(info: CertsInfo, hash: string)
|
||||
|
|
|
@ -61,7 +61,9 @@ export {
|
|||
|
||||
global intermediate_cache: table[string] of vector of opaque of x509;
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
const have_cluster = Cluster::is_enabled();
|
||||
|
||||
@if ( have_cluster ) &analyze
|
||||
event zeek_init()
|
||||
{
|
||||
Broker::auto_publish(Cluster::worker_topic, SSL::intermediate_add);
|
||||
|
@ -72,19 +74,18 @@ event zeek_init()
|
|||
function add_to_cache(key: string, value: vector of opaque of x509)
|
||||
{
|
||||
intermediate_cache[key] = value;
|
||||
@if ( Cluster::is_enabled() )
|
||||
event SSL::new_intermediate(key, value);
|
||||
@endif
|
||||
if ( have_cluster )
|
||||
event SSL::new_intermediate(key, value);
|
||||
}
|
||||
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() != Cluster::MANAGER )
|
||||
@if ( have_cluster && Cluster::local_node_type() != Cluster::MANAGER ) &analyze
|
||||
event SSL::intermediate_add(key: string, value: vector of opaque of x509)
|
||||
{
|
||||
intermediate_cache[key] = value;
|
||||
}
|
||||
@endif
|
||||
|
||||
@if ( Cluster::is_enabled() && Cluster::local_node_type() == Cluster::MANAGER )
|
||||
@if ( have_cluster && Cluster::local_node_type() == Cluster::MANAGER ) &analyze
|
||||
event SSL::new_intermediate(key: string, value: vector of opaque of x509)
|
||||
{
|
||||
if ( key in intermediate_cache )
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
### BTest baseline data generated by btest-diff. Do not edit. Use "btest -U/-u" to update. Requires BTest >= 0.63.
|
||||
### NOTE: This file has been sorted with diff-sort.
|
||||
warning in <...>/check-unused-event-handlers.test, line 7: handler for non-existing event cannot be invoked (this_is_never_used)
|
||||
warning in <params>, line 1: event handler never invoked: Cluster::hello
|
||||
warning in <params>, line 1: event handler never invoked: Cluster::node_down
|
||||
warning in <params>, line 1: event handler never invoked: Cluster::node_up
|
||||
warning in <params>, line 1: event handler never invoked: Control::configuration_update
|
||||
warning in <params>, line 1: event handler never invoked: Control::configuration_update_request
|
||||
warning in <params>, line 1: event handler never invoked: Control::configuration_update_response
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue