Checkpoint, don't try running this. It's broken all over the place.

This commit is contained in:
Seth Hall 2013-03-19 11:39:58 -04:00
parent 8778761c07
commit 6dc204b385
14 changed files with 352 additions and 379 deletions

View file

@ -17,8 +17,8 @@ export {
## The percent of the full threshold value that needs to be met
## on a single worker for that worker to send the value to its manager in
## order for it to request a global view for that value. There is no
## requirement that the manager requests a global view for the index
## since it may opt not to if it requested a global view for the index
## requirement that the manager requests a global view for the key
## since it may opt not to if it requested a global view for the key
## recently.
const cluster_request_global_view_percent = 0.2 &redef;
@ -34,75 +34,74 @@ export {
const enable_intermediate_updates = T &redef;
# Event sent by the manager in a cluster to initiate the
# collection of metrics values for a filter.
global cluster_filter_request: event(uid: string, id: string, filter_name: string);
# collection of metrics values for a measurement.
global cluster_measurement_request: event(uid: string, mid: string);
# Event sent by nodes that are collecting metrics after receiving
# a request for the metric filter from the manager.
global cluster_filter_response: event(uid: string, id: string, filter_name: string, data: MetricTable, done: bool);
# a request for the metric measurement from the manager.
global cluster_measurement_response: event(uid: string, mid: string, data: ResultTable, done: bool);
# This event is sent by the manager in a cluster to initiate the
# collection of a single index value from a filter. It's typically
# collection of a single key value from a measurement. It's typically
# used to get intermediate updates before the break interval triggers
# to speed detection of a value crossing a threshold.
global cluster_index_request: event(uid: string, id: string, filter_name: string, index: Index);
global cluster_key_request: event(uid: string, mid: string, key: Key);
# This event is sent by nodes in response to a
# :bro:id:`Measurement::cluster_index_request` event.
global cluster_index_response: event(uid: string, id: string, filter_name: string, index: Index, val: ResultVal);
# :bro:id:`Measurement::cluster_key_request` event.
global cluster_key_response: event(uid: string, mid: string, key: Key, result: ResultTable);
# This is sent by workers to indicate that they crossed the percent of the
# current threshold by the percentage defined globally in
# :bro:id:`Measurement::cluster_request_global_view_percent`
global cluster_index_intermediate_response: event(id: string, filter_name: string, index: Measurement::Index);
global cluster_key_intermediate_response: event(mid: string, key: Measurement::Key);
# This event is scheduled internally on workers to send result chunks.
global send_data: event(uid: string, id: string, filter_name: string, data: MetricTable);
global send_data: event(uid: string, id: string, measurement_name: string, data: ResultTable);
}
# Add events to the cluster framework to make this work.
redef Cluster::manager2worker_events += /Measurement::cluster_(filter_request|index_request)/;
redef Cluster::worker2manager_events += /Measurement::cluster_(filter_response|index_response|index_intermediate_response)/;
redef Cluster::manager2worker_events += /Measurement::cluster_(measurement_request|key_request)/;
redef Cluster::worker2manager_events += /Measurement::cluster_(measurement_response|key_response|key_intermediate_response)/;
@if ( Cluster::local_node_type() != Cluster::MANAGER )
# This variable is maintained to know what indexes they have recently sent as
# This variable is maintained to know what keysthey have recently sent as
# intermediate updates so they don't overwhelm their manager. The count that is
# yielded is the number of times the percentage threshold has been crossed and
# an intermediate result has been received.
global recent_global_view_indexes: table[string, string, Index] of count &create_expire=1min &default=0;
global recent_global_view_keys: table[string, string, Key] of count &create_expire=1min &default=0;
# This is done on all non-manager node types in the event that a metric is
# being collected somewhere other than a worker.
function data_added(filter: Filter, index: Index, val: ResultVal)
function data_added(measurement: Filter, key: Key, val: Result)
{
# If an intermediate update for this value was sent recently, don't send
# it again.
if ( [filter$id, filter$name, index] in recent_global_view_indexes )
if ( [measurement$id, measurement$name, key] in recent_global_view_keys )
return;
# If val is 5 and global view % is 0.1 (10%), pct_val will be 50. If that
# crosses the full threshold then it's a candidate to send as an
# intermediate update.
if ( enable_intermediate_updates &&
check_thresholds(filter, index, val, cluster_request_global_view_percent) )
check_thresholds(measurement, key, val, cluster_request_global_view_percent) )
{
# kick off intermediate update
event Measurement::cluster_index_intermediate_response(filter$id, filter$name, index);
++recent_global_view_indexes[filter$id, filter$name, index];
event Measurement::cluster_key_intermediate_response(measurement$id, measurement$name, key);
++recent_global_view_keys[measurement$id, measurement$name, key];
}
}
event Measurement::send_data(uid: string, id: string, filter_name: string, data: MetricTable)
event Measurement::send_data(uid: string, id: string, data: ResultTable)
{
#print fmt("WORKER %s: sending data for uid %s...", Cluster::node, uid);
local local_data: MetricTable;
local local_data: ResultTable;
local num_added = 0;
for ( index in data )
for ( key in data )
{
local_data[index] = data[index];
delete data[index];
local_data[key] = data[key];
delete data[key];
# Only send cluster_send_in_groups_of at a time. Queue another
# event to send the next group.
@ -115,35 +114,35 @@ event Measurement::send_data(uid: string, id: string, filter_name: string, data:
if ( |data| == 0 )
done = T;
event Measurement::cluster_filter_response(uid, id, filter_name, local_data, done);
event Measurement::cluster_measurement_response(uid, local_data, done);
if ( ! done )
event Measurement::send_data(uid, id, filter_name, data);
event Measurement::send_data(uid, mid, data);
}
event Measurement::cluster_filter_request(uid: string, id: string, filter_name: string)
event Measurement::cluster_measurement_request(uid: string, mid: string)
{
#print fmt("WORKER %s: received the cluster_filter_request event for %s.", Cluster::node, id);
#print fmt("WORKER %s: received the cluster_measurement_request event for %s.", Cluster::node, id);
# Initiate sending all of the data for the requested filter.
event Measurement::send_data(uid, id, filter_name, store[id, filter_name]);
# Initiate sending all of the data for the requested measurement.
event Measurement::send_data(uid, mid, result_store[mid]);
# Lookup the actual filter and reset it, the reference to the data
# Lookup the actual measurement and reset it, the reference to the data
# currently stored will be maintained internally by the send_data event.
reset(filter_store[id, filter_name]);
reset(measurement_store[mid]);
}
event Measurement::cluster_index_request(uid: string, id: string, filter_name: string, index: Index)
event Measurement::cluster_key_request(uid: string, mid: string, key: Key)
{
if ( [id, filter_name] in store && index in store[id, filter_name] )
if ( [mid] in result_store && key in result_store[mid] )
{
#print fmt("WORKER %s: received the cluster_index_request event for %s=%s.", Cluster::node, index2str(index), data);
event Measurement::cluster_index_response(uid, id, filter_name, index, store[id, filter_name][index]);
#print fmt("WORKER %s: received the cluster_key_request event for %s=%s.", Cluster::node, key2str(key), data);
event Measurement::cluster_key_response(uid, mid, key, result_store[mid][key]);
}
else
{
# We need to send an empty response if we don't have the data so that the manager
# can know that it heard back from all of the workers.
event Measurement::cluster_index_response(uid, id, filter_name, index, [$begin=network_time(), $end=network_time()]);
event Measurement::cluster_key_response(uid, mid, key, [$begin=network_time(), $end=network_time()]);
}
}
@ -153,12 +152,8 @@ event Measurement::cluster_index_request(uid: string, id: string, filter_name: s
@if ( Cluster::local_node_type() == Cluster::MANAGER )
# This variable is maintained by manager nodes as they collect and aggregate
# results.
global filter_results: table[string, string, string] of MetricTable &read_expire=1min;
# This is maintained by managers so they can know what data they requested and
# when they requested it.
global requested_results: table[string] of time = table() &create_expire=5mins;
# results. It's index on a uid.
global measurement_results: table[string] of ResultTable &read_expire=1min;
# This variable is maintained by manager nodes to track how many "dones" they
# collected per collection unique id. Once the number of results for a uid
@ -168,50 +163,49 @@ global requested_results: table[string] of time = table() &create_expire=5mins;
global done_with: table[string] of count &read_expire=1min &default=0;
# This variable is maintained by managers to track intermediate responses as
# they are getting a global view for a certain index.
global index_requests: table[string, string, string, Index] of ResultVal &read_expire=1min;
# they are getting a global view for a certain key. Indexed on a uid.
global key_requests: table[string] of Result &read_expire=1min;
# This variable is maintained by managers to prevent overwhelming communication due
# to too many intermediate updates. Each metric filter is tracked separately so that
# one metric won't overwhelm and degrade other quieter metrics.
global outstanding_global_views: table[string, string] of count &default=0;
# to too many intermediate updates. Each measurement is tracked separately so that
# one metric won't overwhelm and degrade other quieter metrics. Indexed on a
# measurement id.
global outstanding_global_views: table[string] of count &default=0;
# Managers handle logging.
event Measurement::finish_period(filter: Filter)
event Measurement::finish_period(m: Measurement)
{
#print fmt("%.6f MANAGER: breaking %s filter for %s metric", network_time(), filter$name, filter$id);
#print fmt("%.6f MANAGER: breaking %s measurement for %s metric", network_time(), measurement$name, measurement$id);
local uid = unique_id("");
# Set some tracking variables.
requested_results[uid] = network_time();
if ( [uid, filter$id, filter$name] in filter_results )
delete filter_results[uid, filter$id, filter$name];
filter_results[uid, filter$id, filter$name] = table();
if ( uid in measurement_results )
delete measurement_results[uid];
measurement_results[uid] = table();
# Request data from peers.
event Measurement::cluster_filter_request(uid, filter$id, filter$name);
event Measurement::cluster_measurement_request(uid, m$id);
# Schedule the next finish_period event.
schedule filter$every { Measurement::finish_period(filter) };
schedule m$epoch { Measurement::finish_period(m) };
}
# This is unlikely to be called often, but it's here in case there are metrics
# This is unlikely to be called often, but it's here in case there are measurements
# being collected by managers.
function data_added(filter: Filter, index: Index, val: ResultVal)
function data_added(m: Measurement, key: Key, result: Result)
{
if ( check_thresholds(filter, index, val, 1.0) )
threshold_crossed(filter, index, val);
#if ( check_thresholds(m, key, val, 1.0) )
# threshold_crossed(m, key, val);
}
event Measurement::cluster_index_response(uid: string, id: string, filter_name: string, index: Index, val: ResultVal)
event Measurement::cluster_key_response(uid: string, mid: string, key: Key, result: Result)
{
#print fmt("%0.6f MANAGER: receiving index data from %s - %s=%s", network_time(), get_event_peer()$descr, index2str(index), val);
#print fmt("%0.6f MANAGER: receiving key data from %s - %s=%s", network_time(), get_event_peer()$descr, key2str(key), val);
# We only want to try and do a value merge if there are actually measured datapoints
# in the ResultVal.
if ( val$num > 0 && [uid, id, filter_name, index] in index_requests )
index_requests[uid, id, filter_name, index] = merge_result_vals(index_requests[uid, id, filter_name, index], val);
# in the Result.
if ( result$num > 0 && uid in key_requests )
key_requests[uid] = compose_resultvals(key_requests[uid], result);
else
index_requests[uid, id, filter_name, index] = val;
key_requests[uid] = result;
# Mark that this worker is done.
++done_with[uid];
@ -219,27 +213,27 @@ event Measurement::cluster_index_response(uid: string, id: string, filter_name:
#print fmt("worker_count:%d :: done_with:%d", Cluster::worker_count, done_with[uid]);
if ( Cluster::worker_count == done_with[uid] )
{
local ir = index_requests[uid, id, filter_name, index];
if ( check_thresholds(filter_store[id, filter_name], index, ir, 1.0) )
{
threshold_crossed(filter_store[id, filter_name], index, ir);
}
local m = measurement_store[mid];
local ir = key_requests[uid];
if ( check_thresholds(m, key, ir, 1.0) )
threshold_crossed(m, key, ir);
delete done_with[uid];
delete index_requests[uid, id, filter_name, index];
delete key_requests[uid];
# Check that there is an outstanding view before subtracting.
if ( outstanding_global_views[id, filter_name] > 0 )
--outstanding_global_views[id, filter_name];
if ( outstanding_global_views[mid] > 0 )
--outstanding_global_views[mid];
}
}
# Managers handle intermediate updates here.
event Measurement::cluster_index_intermediate_response(id: string, filter_name: string, index: Index)
event Measurement::cluster_key_intermediate_response(mid: string, key: Key)
{
#print fmt("MANAGER: receiving intermediate index data from %s", get_event_peer()$descr);
#print fmt("MANAGER: requesting index data for %s", index2str(index));
#print fmt("MANAGER: receiving intermediate key data from %s", get_event_peer()$descr);
#print fmt("MANAGER: requesting key data for %s", key2str(key));
if ( [id, filter_name] in outstanding_global_views &&
|outstanding_global_views[id, filter_name]| > max_outstanding_global_views )
if ( [mid] in outstanding_global_views &&
|outstanding_global_views[mid]| > max_outstanding_global_views )
{
# Don't do this intermediate update. Perhaps at some point in the future
# we will queue and randomly select from these ignored intermediate
@ -247,38 +241,38 @@ event Measurement::cluster_index_intermediate_response(id: string, filter_name:
return;
}
++outstanding_global_views[id, filter_name];
++outstanding_global_views[mid];
local uid = unique_id("");
event Measurement::cluster_index_request(uid, id, filter_name, index);
event Measurement::cluster_key_request(uid, mid, key);
}
event Measurement::cluster_filter_response(uid: string, id: string, filter_name: string, data: MetricTable, done: bool)
event Measurement::cluster_measurement_response(uid: string, mid: string, data: ResultTable, done: bool)
{
#print fmt("MANAGER: receiving results from %s", get_event_peer()$descr);
# Mark another worker as being "done" for this uid.
if ( done )
++done_with[uid];
local local_data = filter_results[uid, id, filter_name];
local filter = filter_store[id, filter_name];
local local_data = measurement_results[uid];
local m = measurement_store[mid];
for ( index in data )
for ( key in data )
{
if ( index in local_data )
local_data[index] = merge_result_vals(local_data[index], data[index]);
if ( key in local_data )
local_data[key] = compose_resultvals(local_data[key], data[key]);
else
local_data[index] = data[index];
local_data[key] = data[key];
# If a filter is done being collected, thresholds for each index
# If a measurement is done being collected, thresholds for each key
# need to be checked so we're doing it here to avoid doubly iterating
# over each index.
# over each key.
if ( Cluster::worker_count == done_with[uid] )
{
if ( check_thresholds(filter, index, local_data[index], 1.0) )
if ( check_thresholds(m, key, local_data[key], 1.0) )
{
threshold_crossed(filter, index, local_data[index]);
threshold_crossed(m, key, local_data[key]);
}
}
}
@ -286,22 +280,14 @@ event Measurement::cluster_filter_response(uid: string, id: string, filter_name:
# If the data has been collected from all peers, we are done and ready to finish.
if ( Cluster::worker_count == done_with[uid] )
{
local ts = network_time();
# Log the time this was initially requested if it's available.
if ( uid in requested_results )
{
ts = requested_results[uid];
delete requested_results[uid];
}
if ( filter?$period_finished )
filter$period_finished(ts, filter$id, filter$name, local_data);
if ( m?$period_finished )
m$period_finished(local_data);
# Clean up
delete filter_results[uid, id, filter_name];
delete measurement_results[uid];
delete done_with[uid];
# Not sure I need to reset the filter on the manager.
reset(filter);
# Not sure I need to reset the measurement on the manager.
reset(m);
}
}