Large update for the SumStats framework.

- On-demand access to sumstats results through "return from"
   functions named SumStats::request and Sumstats::request_key.
   Both functions are tested in standalone and clustered modes.

 - $name field has returned to SumStats which simplifies cluster
   code and makes the on-demand access stuff possible.

 - Clustered results can only be collected for 1 minute from their
   time of creation now instead of time of last read.

 - Thresholds use doubles instead of counts everywhere now.

 - Calculation dependency resolution occurs at start up time now
   instead of doing it at observation time which provide a minor
   cpu performance improvement.  A new plugin registration mechanism
   was created to support this change.

 - AppStats now has a minimal doc string and is broken into hook-based
   plugins.

 - AppStats and traceroute detection added to local.bro
This commit is contained in:
Seth Hall 2013-05-21 15:52:59 -04:00
parent 7d7d30e1f7
commit bec965b66f
34 changed files with 687 additions and 277 deletions

View file

@ -27,39 +27,34 @@ export {
## performed. In practice this should hopefully have a minimal effect.
const max_outstanding_global_views = 10 &redef;
## Intermediate updates can cause overload situations on very large clusters. This
## option may help reduce load and correct intermittent problems. The goal for this
## option is also meant to be temporary.
const enable_intermediate_updates = T &redef;
## Event sent by the manager in a cluster to initiate the collection of values for
## a sumstat.
global cluster_ss_request: event(uid: string, ssid: string);
global cluster_ss_request: event(uid: string, ss_name: string, cleanup: bool);
## Event sent by nodes that are collecting sumstats after receiving a request for
## the sumstat from the manager.
global cluster_ss_response: event(uid: string, ssid: string, data: ResultTable, done: bool);
global cluster_ss_response: event(uid: string, ss_name: string, data: ResultTable, done: bool);
## This event is sent by the manager in a cluster to initiate the collection of
## a single key value from a sumstat. It's typically used to get intermediate
## updates before the break interval triggers to speed detection of a value
## crossing a threshold.
global cluster_key_request: event(uid: string, ssid: string, key: Key);
global cluster_key_request: event(uid: string, ss_name: string, key: Key, cleanup: bool);
## This event is sent by nodes in response to a
## :bro:id:`SumStats::cluster_key_request` event.
global cluster_key_response: event(uid: string, ssid: string, key: Key, result: Result);
global cluster_key_response: event(uid: string, ss_name: string, key: Key, result: Result, cleanup: bool);
## This is sent by workers to indicate that they crossed the percent
## of the current threshold by the percentage defined globally in
## :bro:id:`SumStats::cluster_request_global_view_percent`
global cluster_key_intermediate_response: event(ssid: string, key: SumStats::Key);
global cluster_key_intermediate_response: event(ss_name: string, key: SumStats::Key);
## This event is scheduled internally on workers to send result chunks.
global send_data: event(uid: string, ssid: string, data: ResultTable);
global send_data: event(uid: string, ss_name: string, data: ResultTable, cleanup: bool);
## This event is generated when a threshold is crossed.
global cluster_threshold_crossed: event(ssid: string, key: SumStats::Key, thold: Thresholding);
global cluster_threshold_crossed: event(ss_name: string, key: SumStats::Key, thold: Thresholding);
}
# Add events to the cluster framework to make this work.
@ -74,44 +69,38 @@ redef Cluster::worker2manager_events += /SumStats::cluster_(ss_response|key_resp
# an intermediate result has been received.
global recent_global_view_keys: table[string, Key] of count &create_expire=1min &default=0;
event bro_init() &priority=-100
{
# The manager is the only host allowed to track these.
stats_store = table();
reducer_store = table();
}
# This is done on all non-manager node types in the event that a sumstat is
# being collected somewhere other than a worker.
function data_added(ss: SumStat, key: Key, result: Result)
{
# If an intermediate update for this value was sent recently, don't send
# it again.
if ( [ss$id, key] in recent_global_view_keys )
if ( [ss$name, key] in recent_global_view_keys )
return;
# If val is 5 and global view % is 0.1 (10%), pct_val will be 50. If that
# crosses the full threshold then it's a candidate to send as an
# intermediate update.
if ( enable_intermediate_updates &&
check_thresholds(ss, key, result, cluster_request_global_view_percent) )
if ( check_thresholds(ss, key, result, cluster_request_global_view_percent) )
{
# kick off intermediate update
event SumStats::cluster_key_intermediate_response(ss$id, key);
++recent_global_view_keys[ss$id, key];
event SumStats::cluster_key_intermediate_response(ss$name, key);
++recent_global_view_keys[ss$name, key];
}
}
event SumStats::send_data(uid: string, ssid: string, data: ResultTable)
event SumStats::send_data(uid: string, ss_name: string, data: ResultTable, cleanup: bool)
{
#print fmt("WORKER %s: sending data for uid %s...", Cluster::node, uid);
local local_data: ResultTable = table();
local incoming_data: ResultTable = cleanup ? data : copy(data);
local num_added = 0;
for ( key in data )
for ( key in incoming_data )
{
local_data[key] = data[key];
delete data[key];
local_data[key] = incoming_data[key];
delete incoming_data[key];
# Only send cluster_send_in_groups_of at a time. Queue another
# event to send the next group.
@ -121,56 +110,56 @@ event SumStats::send_data(uid: string, ssid: string, data: ResultTable)
local done = F;
# If data is empty, this sumstat is done.
if ( |data| == 0 )
if ( |incoming_data| == 0 )
done = T;
event SumStats::cluster_ss_response(uid, ssid, local_data, done);
event SumStats::cluster_ss_response(uid, ss_name, local_data, done);
if ( ! done )
schedule 0.01 sec { SumStats::send_data(uid, ssid, data) };
schedule 0.01 sec { SumStats::send_data(uid, ss_name, incoming_data, T) };
}
event SumStats::cluster_ss_request(uid: string, ssid: string)
event SumStats::cluster_ss_request(uid: string, ss_name: string, cleanup: bool)
{
#print fmt("WORKER %s: received the cluster_ss_request event for %s.", Cluster::node, id);
# Initiate sending all of the data for the requested stats.
if ( ssid in result_store )
event SumStats::send_data(uid, ssid, result_store[ssid]);
if ( ss_name in result_store )
event SumStats::send_data(uid, ss_name, result_store[ss_name], cleanup);
else
event SumStats::send_data(uid, ssid, table());
event SumStats::send_data(uid, ss_name, table(), cleanup);
# Lookup the actual sumstats and reset it, the reference to the data
# currently stored will be maintained internally by the send_data event.
if ( ssid in stats_store )
reset(stats_store[ssid]);
if ( ss_name in stats_store && cleanup )
reset(stats_store[ss_name]);
}
event SumStats::cluster_key_request(uid: string, ssid: string, key: Key)
event SumStats::cluster_key_request(uid: string, ss_name: string, key: Key, cleanup: bool)
{
if ( ssid in result_store && key in result_store[ssid] )
if ( ss_name in result_store && key in result_store[ss_name] )
{
#print fmt("WORKER %s: received the cluster_key_request event for %s=%s.", Cluster::node, key2str(key), data);
event SumStats::cluster_key_response(uid, ssid, key, result_store[ssid][key]);
event SumStats::cluster_key_response(uid, ss_name, key, result_store[ss_name][key], cleanup);
}
else
{
# We need to send an empty response if we don't have the data so that the manager
# can know that it heard back from all of the workers.
event SumStats::cluster_key_response(uid, ssid, key, table());
event SumStats::cluster_key_response(uid, ss_name, key, table(), cleanup);
}
}
event SumStats::cluster_threshold_crossed(ssid: string, key: SumStats::Key, thold: Thresholding)
event SumStats::cluster_threshold_crossed(ss_name: string, key: SumStats::Key, thold: Thresholding)
{
if ( ssid !in threshold_tracker )
threshold_tracker[ssid] = table();
if ( ss_name !in threshold_tracker )
threshold_tracker[ss_name] = table();
threshold_tracker[ssid][key] = thold;
threshold_tracker[ss_name][key] = thold;
}
event SumStats::thresholds_reset(ssid: string)
event SumStats::thresholds_reset(ss_name: string)
{
threshold_tracker[ssid] = table();
threshold_tracker[ss_name] = table();
}
@endif
@ -181,7 +170,7 @@ event SumStats::thresholds_reset(ssid: string)
# This variable is maintained by manager nodes as they collect and aggregate
# results.
# Index on a uid.
global stats_results: table[string] of ResultTable &read_expire=1min;
global stats_results: table[string] of ResultTable &create_expire=1min &default=table();
# This variable is maintained by manager nodes to track how many "dones" they
# collected per collection unique id. Once the number of results for a uid
@ -189,18 +178,18 @@ global stats_results: table[string] of ResultTable &read_expire=1min;
# result is written out and deleted from here.
# Indexed on a uid.
# TODO: add an &expire_func in case not all results are received.
global done_with: table[string] of count &read_expire=1min &default=0;
global done_with: table[string] of count &create_expire=1min &default=0;
# This variable is maintained by managers to track intermediate responses as
# they are getting a global view for a certain key.
# Indexed on a uid.
global key_requests: table[string] of Result &read_expire=1min;
global key_requests: table[string] of Result &create_expire=1min;
# This variable is maintained by managers to prevent overwhelming communication due
# to too many intermediate updates. Each sumstat is tracked separately so that
# one won't overwhelm and degrade other quieter sumstats.
# Indexed on a sumstat id.
global outstanding_global_views: table[string] of count &default=0;
global outstanding_global_views: table[string] of count &create_expire=1min &default=0;
const zero_time = double_to_time(0.0);
# Managers handle logging.
@ -216,7 +205,7 @@ event SumStats::finish_epoch(ss: SumStat)
stats_results[uid] = table();
# Request data from peers.
event SumStats::cluster_ss_request(uid, ss$id);
event SumStats::cluster_ss_request(uid, ss$name, T);
}
# Schedule the next finish_epoch event.
@ -230,20 +219,20 @@ function data_added(ss: SumStat, key: Key, result: Result)
if ( check_thresholds(ss, key, result, 1.0) )
{
threshold_crossed(ss, key, result);
event SumStats::cluster_threshold_crossed(ss$id, key, threshold_tracker[ss$id][key]);
event SumStats::cluster_threshold_crossed(ss$name, key, threshold_tracker[ss$name][key]);
}
}
event SumStats::cluster_key_response(uid: string, ssid: string, key: Key, result: Result)
event SumStats::cluster_key_response(uid: string, ss_name: string, key: Key, result: Result, cleanup: bool)
{
#print fmt("%0.6f MANAGER: receiving key data from %s - %s=%s", network_time(), get_event_peer()$descr, key2str(key), result);
# We only want to try and do a value merge if there are actually measured datapoints
# in the Result.
if ( uid in key_requests )
key_requests[uid] = compose_results(key_requests[uid], result);
else
if ( uid !in key_requests || |key_requests[uid]| == 0 )
key_requests[uid] = result;
else
key_requests[uid] = compose_results(key_requests[uid], result);
# Mark that a worker is done.
++done_with[uid];
@ -251,30 +240,39 @@ event SumStats::cluster_key_response(uid: string, ssid: string, key: Key, result
#print fmt("worker_count:%d :: done_with:%d", Cluster::worker_count, done_with[uid]);
if ( Cluster::worker_count == done_with[uid] )
{
local ss = stats_store[ssid];
local ss = stats_store[ss_name];
local ir = key_requests[uid];
if ( check_thresholds(ss, key, ir, 1.0) )
{
threshold_crossed(ss, key, ir);
event SumStats::cluster_threshold_crossed(ss$id, key, threshold_tracker[ss$id][key]);
event SumStats::cluster_threshold_crossed(ss$name, key, threshold_tracker[ss$name][key]);
}
delete done_with[uid];
delete key_requests[uid];
# Check that there is an outstanding view before subtracting.
if ( outstanding_global_views[ssid] > 0 )
--outstanding_global_views[ssid];
if ( cleanup )
{
# We only want to delete the data if this is a non dynamic
# request because the dynamic requests use when statements
# and the data needs to remain available.
delete key_requests[uid];
delete done_with[uid];
# Check that there is an outstanding view before subtracting.
# Global views only apply to non-dynamic requests. Dynamic
# requests must be serviced.
if ( outstanding_global_views[ss_name] > 0 )
--outstanding_global_views[ss_name];
}
}
}
# Managers handle intermediate updates here.
event SumStats::cluster_key_intermediate_response(ssid: string, key: Key)
event SumStats::cluster_key_intermediate_response(ss_name: string, key: Key)
{
#print fmt("MANAGER: receiving intermediate key data from %s", get_event_peer()$descr);
#print fmt("MANAGER: requesting key data for %s", key2str(key));
if ( ssid in outstanding_global_views &&
|outstanding_global_views[ssid]| > max_outstanding_global_views )
if ( ss_name in outstanding_global_views &&
|outstanding_global_views[ss_name]| > max_outstanding_global_views )
{
# Don't do this intermediate update. Perhaps at some point in the future
# we will queue and randomly select from these ignored intermediate
@ -282,13 +280,14 @@ event SumStats::cluster_key_intermediate_response(ssid: string, key: Key)
return;
}
++outstanding_global_views[ssid];
++outstanding_global_views[ss_name];
local uid = unique_id("");
event SumStats::cluster_key_request(uid, ssid, key);
done_with[uid] = 0;
event SumStats::cluster_key_request(uid, ss_name, key, T);
}
event SumStats::cluster_ss_response(uid: string, ssid: string, data: ResultTable, done: bool)
event SumStats::cluster_ss_response(uid: string, ss_name: string, data: ResultTable, done: bool)
{
#print fmt("MANAGER: receiving results from %s", get_event_peer()$descr);
@ -297,7 +296,7 @@ event SumStats::cluster_ss_response(uid: string, ssid: string, data: ResultTable
++done_with[uid];
local local_data = stats_results[uid];
local ss = stats_store[ssid];
local ss = stats_store[ss_name];
for ( key in data )
{
@ -314,13 +313,14 @@ event SumStats::cluster_ss_response(uid: string, ssid: string, data: ResultTable
if ( check_thresholds(ss, key, local_data[key], 1.0) )
{
threshold_crossed(ss, key, local_data[key]);
event SumStats::cluster_threshold_crossed(ss$id, key, threshold_tracker[ss$id][key]);
event SumStats::cluster_threshold_crossed(ss$name, key, threshold_tracker[ss$name][key]);
}
}
}
# If the data has been collected from all peers, we are done and ready to finish.
if ( Cluster::worker_count == done_with[uid] )
if ( Cluster::worker_count == done_with[uid] &&
/^dyn-/ !in uid )
{
if ( ss?$epoch_finished )
ss$epoch_finished(local_data);
@ -328,14 +328,60 @@ event SumStats::cluster_ss_response(uid: string, ssid: string, data: ResultTable
# Clean up
delete stats_results[uid];
delete done_with[uid];
# Not sure I need to reset the sumstat on the manager.
reset(ss);
}
}
event remote_connection_handshake_done(p: event_peer) &priority=5
function request(ss_name: string): ResultTable
{
send_id(p, "SumStats::stats_store");
send_id(p, "SumStats::reducer_store");
# This only needs to be implemented this way for cluster compatibility.
local uid = unique_id("dyn-");
stats_results[uid] = table();
done_with[uid] = 0;
event SumStats::cluster_ss_request(uid, ss_name, F);
return when ( uid in done_with && Cluster::worker_count == done_with[uid] )
{
if ( uid in stats_results )
{
local ss_result = stats_results[uid];
# Clean up
delete stats_results[uid];
delete done_with[uid];
reset(stats_store[ss_name]);
return ss_result;
}
else
return table();
}
timeout 1.1min
{
Reporter::warning(fmt("Dynamic SumStat request for %s took longer than 1 minute and was automatically cancelled.", ss_name));
return table();
}
}
function request_key(ss_name: string, key: Key): Result
{
local uid = unique_id("dyn-");
done_with[uid] = 0;
key_requests[uid] = table();
event SumStats::cluster_key_request(uid, ss_name, key, F);
return when ( uid in done_with && Cluster::worker_count == done_with[uid] )
{
local result = key_requests[uid];
# Clean up
delete key_requests[uid];
delete done_with[uid];
return result;
}
timeout 1.1min
{
Reporter::warning(fmt("Dynamic SumStat key request for %s (%s) took longer than 1 minute and was automatically cancelled.", ss_name, key));
return table();
}
}
@endif