mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 06:38:20 +00:00
Checkpoint for SumStats rename.
This commit is contained in:
parent
8165d6077d
commit
fbe967e16a
32 changed files with 626 additions and 620 deletions
|
@ -7,7 +7,7 @@
|
||||||
@load base/frameworks/cluster
|
@load base/frameworks/cluster
|
||||||
@load ./main
|
@load ./main
|
||||||
|
|
||||||
module Measurement;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## Allows a user to decide how large of result groups the
|
## Allows a user to decide how large of result groups the
|
||||||
|
@ -48,22 +48,21 @@ export {
|
||||||
global cluster_key_request: event(uid: string, mid: string, key: Key);
|
global cluster_key_request: event(uid: string, mid: string, key: Key);
|
||||||
|
|
||||||
## This event is sent by nodes in response to a
|
## This event is sent by nodes in response to a
|
||||||
## :bro:id:`Measurement::cluster_key_request` event.
|
## :bro:id:`SumStats::cluster_key_request` event.
|
||||||
global cluster_key_response: event(uid: string, mid: string, key: Key, result: Result);
|
global cluster_key_response: event(uid: string, mid: string, key: Key, result: Result);
|
||||||
|
|
||||||
## This is sent by workers to indicate that they crossed the percent of the
|
## This is sent by workers to indicate that they crossed the percent of the
|
||||||
## current threshold by the percentage defined globally in
|
## current threshold by the percentage defined globally in
|
||||||
## :bro:id:`Measurement::cluster_request_global_view_percent`
|
## :bro:id:`SumStats::cluster_request_global_view_percent`
|
||||||
global cluster_key_intermediate_response: event(mid: string, key: Measurement::Key);
|
global cluster_key_intermediate_response: event(mid: string, key: SumStats::Key);
|
||||||
|
|
||||||
## This event is scheduled internally on workers to send result chunks.
|
## This event is scheduled internally on workers to send result chunks.
|
||||||
global send_data: event(uid: string, mid: string, data: ResultTable);
|
global send_data: event(uid: string, mid: string, data: ResultTable);
|
||||||
}
|
}
|
||||||
|
|
||||||
# Add events to the cluster framework to make this work.
|
# Add events to the cluster framework to make this work.
|
||||||
redef Cluster::manager2worker_events += /Measurement::cluster_(measurement_request|key_request)/;
|
redef Cluster::manager2worker_events += /SumStats::cluster_(measurement_request|key_request)/;
|
||||||
redef Cluster::manager2worker_events += /Measurement::new_measurement/;
|
redef Cluster::worker2manager_events += /SumStats::cluster_(measurement_response|key_response|key_intermediate_response)/;
|
||||||
redef Cluster::worker2manager_events += /Measurement::cluster_(measurement_response|key_response|key_intermediate_response)/;
|
|
||||||
|
|
||||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||||
# This variable is maintained to know what keys have recently sent as
|
# This variable is maintained to know what keys have recently sent as
|
||||||
|
@ -75,32 +74,32 @@ global recent_global_view_keys: table[string, Key] of count &create_expire=1min
|
||||||
event bro_init() &priority=-100
|
event bro_init() &priority=-100
|
||||||
{
|
{
|
||||||
# The manager is the only host allowed to track these.
|
# The manager is the only host allowed to track these.
|
||||||
measurement_store = table();
|
stats_store = table();
|
||||||
reducer_store = table();
|
reducer_store = table();
|
||||||
}
|
}
|
||||||
|
|
||||||
# This is done on all non-manager node types in the event that a metric is
|
# This is done on all non-manager node types in the event that a metric is
|
||||||
# being collected somewhere other than a worker.
|
# being collected somewhere other than a worker.
|
||||||
function data_added(m: Measurement, key: Key, result: Result)
|
function data_added(ss: SumStat, key: Key, result: Result)
|
||||||
{
|
{
|
||||||
# If an intermediate update for this value was sent recently, don't send
|
# If an intermediate update for this value was sent recently, don't send
|
||||||
# it again.
|
# it again.
|
||||||
if ( [m$id, key] in recent_global_view_keys )
|
if ( [ss$id, key] in recent_global_view_keys )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
# If val is 5 and global view % is 0.1 (10%), pct_val will be 50. If that
|
# If val is 5 and global view % is 0.1 (10%), pct_val will be 50. If that
|
||||||
# crosses the full threshold then it's a candidate to send as an
|
# crosses the full threshold then it's a candidate to send as an
|
||||||
# intermediate update.
|
# intermediate update.
|
||||||
if ( enable_intermediate_updates &&
|
if ( enable_intermediate_updates &&
|
||||||
check_thresholds(m, key, result, cluster_request_global_view_percent) )
|
check_thresholds(ss, key, result, cluster_request_global_view_percent) )
|
||||||
{
|
{
|
||||||
# kick off intermediate update
|
# kick off intermediate update
|
||||||
event Measurement::cluster_key_intermediate_response(m$id, key);
|
event SumStats::cluster_key_intermediate_response(ss$id, key);
|
||||||
++recent_global_view_keys[m$id, key];
|
++recent_global_view_keys[ss$id, key];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
event Measurement::send_data(uid: string, mid: string, data: ResultTable)
|
event SumStats::send_data(uid: string, mid: string, data: ResultTable)
|
||||||
{
|
{
|
||||||
#print fmt("WORKER %s: sending data for uid %s...", Cluster::node, uid);
|
#print fmt("WORKER %s: sending data for uid %s...", Cluster::node, uid);
|
||||||
|
|
||||||
|
@ -122,39 +121,39 @@ event Measurement::send_data(uid: string, mid: string, data: ResultTable)
|
||||||
if ( |data| == 0 )
|
if ( |data| == 0 )
|
||||||
done = T;
|
done = T;
|
||||||
|
|
||||||
event Measurement::cluster_measurement_response(uid, mid, local_data, done);
|
event SumStats::cluster_measurement_response(uid, mid, local_data, done);
|
||||||
if ( ! done )
|
if ( ! done )
|
||||||
schedule 0.01 sec { Measurement::send_data(uid, mid, data) };
|
schedule 0.01 sec { SumStats::send_data(uid, mid, data) };
|
||||||
}
|
}
|
||||||
|
|
||||||
event Measurement::cluster_measurement_request(uid: string, mid: string)
|
event SumStats::cluster_measurement_request(uid: string, mid: string)
|
||||||
{
|
{
|
||||||
#print fmt("WORKER %s: received the cluster_measurement_request event for %s.", Cluster::node, id);
|
#print fmt("WORKER %s: received the cluster_measurement_request event for %s.", Cluster::node, id);
|
||||||
|
|
||||||
# Initiate sending all of the data for the requested measurement.
|
# Initiate sending all of the data for the requested measurement.
|
||||||
if ( mid in result_store )
|
if ( mid in result_store )
|
||||||
event Measurement::send_data(uid, mid, result_store[mid]);
|
event SumStats::send_data(uid, mid, result_store[mid]);
|
||||||
else
|
else
|
||||||
event Measurement::send_data(uid, mid, table());
|
event SumStats::send_data(uid, mid, table());
|
||||||
|
|
||||||
# Lookup the actual measurement and reset it, the reference to the data
|
# Lookup the actual measurement and reset it, the reference to the data
|
||||||
# currently stored will be maintained internally by the send_data event.
|
# currently stored will be maintained internally by the send_data event.
|
||||||
if ( mid in measurement_store )
|
if ( mid in stats_store )
|
||||||
reset(measurement_store[mid]);
|
reset(stats_store[mid]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event Measurement::cluster_key_request(uid: string, mid: string, key: Key)
|
event SumStats::cluster_key_request(uid: string, mid: string, key: Key)
|
||||||
{
|
{
|
||||||
if ( mid in result_store && key in result_store[mid] )
|
if ( mid in result_store && key in result_store[mid] )
|
||||||
{
|
{
|
||||||
#print fmt("WORKER %s: received the cluster_key_request event for %s=%s.", Cluster::node, key2str(key), data);
|
#print fmt("WORKER %s: received the cluster_key_request event for %s=%s.", Cluster::node, key2str(key), data);
|
||||||
event Measurement::cluster_key_response(uid, mid, key, result_store[mid][key]);
|
event SumStats::cluster_key_response(uid, mid, key, result_store[mid][key]);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
# We need to send an empty response if we don't have the data so that the manager
|
# We need to send an empty response if we don't have the data so that the manager
|
||||||
# can know that it heard back from all of the workers.
|
# can know that it heard back from all of the workers.
|
||||||
event Measurement::cluster_key_response(uid, mid, key, table());
|
event SumStats::cluster_key_response(uid, mid, key, table());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,7 +165,7 @@ event Measurement::cluster_key_request(uid: string, mid: string, key: Key)
|
||||||
# This variable is maintained by manager nodes as they collect and aggregate
|
# This variable is maintained by manager nodes as they collect and aggregate
|
||||||
# results.
|
# results.
|
||||||
# Index on a uid.
|
# Index on a uid.
|
||||||
global measurement_results: table[string] of ResultTable &read_expire=1min;
|
global stats_results: table[string] of ResultTable &read_expire=1min;
|
||||||
|
|
||||||
# This variable is maintained by manager nodes to track how many "dones" they
|
# This variable is maintained by manager nodes to track how many "dones" they
|
||||||
# collected per collection unique id. Once the number of results for a uid
|
# collected per collection unique id. Once the number of results for a uid
|
||||||
|
@ -189,7 +188,7 @@ global outstanding_global_views: table[string] of count &default=0;
|
||||||
|
|
||||||
const zero_time = double_to_time(0.0);
|
const zero_time = double_to_time(0.0);
|
||||||
# Managers handle logging.
|
# Managers handle logging.
|
||||||
event Measurement::finish_epoch(m: Measurement)
|
event SumStats::finish_epoch(ss: SumStat)
|
||||||
{
|
{
|
||||||
if ( network_time() > zero_time )
|
if ( network_time() > zero_time )
|
||||||
{
|
{
|
||||||
|
@ -198,25 +197,25 @@ event Measurement::finish_epoch(m: Measurement)
|
||||||
|
|
||||||
if ( uid in measurement_results )
|
if ( uid in measurement_results )
|
||||||
delete measurement_results[uid];
|
delete measurement_results[uid];
|
||||||
measurement_results[uid] = table();
|
stats_results[uid] = table();
|
||||||
|
|
||||||
# Request data from peers.
|
# Request data from peers.
|
||||||
event Measurement::cluster_measurement_request(uid, m$id);
|
event SumStats::cluster_measurement_request(uid, ss$id);
|
||||||
}
|
}
|
||||||
|
|
||||||
# Schedule the next finish_epoch event.
|
# Schedule the next finish_epoch event.
|
||||||
schedule m$epoch { Measurement::finish_epoch(m) };
|
schedule m$epoch { SumStats::finish_epoch(m) };
|
||||||
}
|
}
|
||||||
|
|
||||||
# This is unlikely to be called often, but it's here in case there are measurements
|
# This is unlikely to be called often, but it's here in case there are measurements
|
||||||
# being collected by managers.
|
# being collected by managers.
|
||||||
function data_added(m: Measurement, key: Key, result: Result)
|
function data_added(ss: SumStat, key: Key, result: Result)
|
||||||
{
|
{
|
||||||
if ( check_thresholds(m, key, result, 1.0) )
|
if ( check_thresholds(ss, key, result, 1.0) )
|
||||||
threshold_crossed(m, key, result);
|
threshold_crossed(ss, key, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
event Measurement::cluster_key_response(uid: string, mid: string, key: Key, result: Result)
|
event SumStats::cluster_key_response(uid: string, ssid: string, key: Key, result: Result)
|
||||||
{
|
{
|
||||||
#print fmt("%0.6f MANAGER: receiving key data from %s - %s=%s", network_time(), get_event_peer()$descr, key2str(key), result);
|
#print fmt("%0.6f MANAGER: receiving key data from %s - %s=%s", network_time(), get_event_peer()$descr, key2str(key), result);
|
||||||
|
|
||||||
|
@ -233,26 +232,26 @@ event Measurement::cluster_key_response(uid: string, mid: string, key: Key, resu
|
||||||
#print fmt("worker_count:%d :: done_with:%d", Cluster::worker_count, done_with[uid]);
|
#print fmt("worker_count:%d :: done_with:%d", Cluster::worker_count, done_with[uid]);
|
||||||
if ( Cluster::worker_count == done_with[uid] )
|
if ( Cluster::worker_count == done_with[uid] )
|
||||||
{
|
{
|
||||||
local m = measurement_store[mid];
|
local ss = stats_store[ssid];
|
||||||
local ir = key_requests[uid];
|
local ir = key_requests[uid];
|
||||||
if ( check_thresholds(m, key, ir, 1.0) )
|
if ( check_thresholds(ss, key, ir, 1.0) )
|
||||||
threshold_crossed(m, key, ir);
|
threshold_crossed(ss, key, ir);
|
||||||
|
|
||||||
delete done_with[uid];
|
delete done_with[uid];
|
||||||
delete key_requests[uid];
|
delete key_requests[uid];
|
||||||
# Check that there is an outstanding view before subtracting.
|
# Check that there is an outstanding view before subtracting.
|
||||||
if ( outstanding_global_views[mid] > 0 )
|
if ( outstanding_global_views[ssid] > 0 )
|
||||||
--outstanding_global_views[mid];
|
--outstanding_global_views[ssid];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Managers handle intermediate updates here.
|
# Managers handle intermediate updates here.
|
||||||
event Measurement::cluster_key_intermediate_response(mid: string, key: Key)
|
event SumStats::cluster_key_intermediate_response(ssid: string, key: Key)
|
||||||
{
|
{
|
||||||
#print fmt("MANAGER: receiving intermediate key data from %s", get_event_peer()$descr);
|
#print fmt("MANAGER: receiving intermediate key data from %s", get_event_peer()$descr);
|
||||||
#print fmt("MANAGER: requesting key data for %s", key2str(key));
|
#print fmt("MANAGER: requesting key data for %s", key2str(key));
|
||||||
|
|
||||||
if ( mid in outstanding_global_views &&
|
if ( ssid in outstanding_global_views &&
|
||||||
|outstanding_global_views[mid]| > max_outstanding_global_views )
|
|outstanding_global_views[mid]| > max_outstanding_global_views )
|
||||||
{
|
{
|
||||||
# Don't do this intermediate update. Perhaps at some point in the future
|
# Don't do this intermediate update. Perhaps at some point in the future
|
||||||
|
@ -261,13 +260,13 @@ event Measurement::cluster_key_intermediate_response(mid: string, key: Key)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
++outstanding_global_views[mid];
|
++outstanding_global_views[ssid];
|
||||||
|
|
||||||
local uid = unique_id("");
|
local uid = unique_id("");
|
||||||
event Measurement::cluster_key_request(uid, mid, key);
|
event SumStats::cluster_key_request(uid, ssid, key);
|
||||||
}
|
}
|
||||||
|
|
||||||
event Measurement::cluster_measurement_response(uid: string, mid: string, data: ResultTable, done: bool)
|
event SumStats::cluster_measurement_response(uid: string, ssid: string, data: ResultTable, done: bool)
|
||||||
{
|
{
|
||||||
#print fmt("MANAGER: receiving results from %s", get_event_peer()$descr);
|
#print fmt("MANAGER: receiving results from %s", get_event_peer()$descr);
|
||||||
|
|
||||||
|
@ -275,8 +274,8 @@ event Measurement::cluster_measurement_response(uid: string, mid: string, data:
|
||||||
if ( done )
|
if ( done )
|
||||||
++done_with[uid];
|
++done_with[uid];
|
||||||
|
|
||||||
local local_data = measurement_results[uid];
|
local local_data = stats_results[uid];
|
||||||
local m = measurement_store[mid];
|
local ss = stats_store[ssid];
|
||||||
|
|
||||||
for ( key in data )
|
for ( key in data )
|
||||||
{
|
{
|
||||||
|
@ -285,14 +284,14 @@ event Measurement::cluster_measurement_response(uid: string, mid: string, data:
|
||||||
else
|
else
|
||||||
local_data[key] = data[key];
|
local_data[key] = data[key];
|
||||||
|
|
||||||
# If a measurement is done being collected, thresholds for each key
|
# If a stat is done being collected, thresholds for each key
|
||||||
# need to be checked so we're doing it here to avoid doubly iterating
|
# need to be checked so we're doing it here to avoid doubly
|
||||||
# over each key.
|
# iterating over each key.
|
||||||
if ( Cluster::worker_count == done_with[uid] )
|
if ( Cluster::worker_count == done_with[uid] )
|
||||||
{
|
{
|
||||||
if ( check_thresholds(m, key, local_data[key], 1.0) )
|
if ( check_thresholds(ss, key, local_data[key], 1.0) )
|
||||||
{
|
{
|
||||||
threshold_crossed(m, key, local_data[key]);
|
threshold_crossed(ss, key, local_data[key]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -300,20 +299,20 @@ event Measurement::cluster_measurement_response(uid: string, mid: string, data:
|
||||||
# If the data has been collected from all peers, we are done and ready to finish.
|
# If the data has been collected from all peers, we are done and ready to finish.
|
||||||
if ( Cluster::worker_count == done_with[uid] )
|
if ( Cluster::worker_count == done_with[uid] )
|
||||||
{
|
{
|
||||||
if ( m?$epoch_finished )
|
if ( ss?$epoch_finished )
|
||||||
m$epoch_finished(local_data);
|
ss$epoch_finished(local_data);
|
||||||
|
|
||||||
# Clean up
|
# Clean up
|
||||||
delete measurement_results[uid];
|
delete stats_results[uid];
|
||||||
delete done_with[uid];
|
delete done_with[uid];
|
||||||
# Not sure I need to reset the measurement on the manager.
|
# Not sure I need to reset the measurement on the manager.
|
||||||
reset(m);
|
reset(ss);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
event remote_connection_handshake_done(p: event_peer) &priority=5
|
event remote_connection_handshake_done(p: event_peer) &priority=5
|
||||||
{
|
{
|
||||||
send_id(p, "Measurement::measurement_store");
|
send_id(p, "SumStats::stats_store");
|
||||||
send_id(p, "Measurement::reducer_store");
|
send_id(p, "SumStats::reducer_store");
|
||||||
}
|
}
|
||||||
@endif
|
@endif
|
|
@ -1,6 +1,8 @@
|
||||||
##! The measurement framework provides a way to count and measure data.
|
##! The summary statistics framework provides a way to
|
||||||
|
##! summarize large streams of data into simple reduced
|
||||||
|
##! measurements.
|
||||||
|
|
||||||
module Measurement;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## The various calculations are all defined as plugins.
|
## The various calculations are all defined as plugins.
|
||||||
|
@ -8,14 +10,17 @@ export {
|
||||||
PLACEHOLDER
|
PLACEHOLDER
|
||||||
};
|
};
|
||||||
|
|
||||||
## Represents a thing which is having measurement results collected for it.
|
## Represents a thing which is having summarization
|
||||||
|
## results collected for it.
|
||||||
type Key: record {
|
type Key: record {
|
||||||
## A non-address related measurement or a sub-key for an address based measurement.
|
## A non-address related summarization or a sub-key for
|
||||||
## An example might be successful SSH connections by client IP address
|
## an address based summarization. An example might be
|
||||||
|
## successful SSH connections by client IP address
|
||||||
## where the client string would be the key value.
|
## where the client string would be the key value.
|
||||||
## Another example might be number of HTTP requests to a particular
|
## Another example might be number of HTTP requests to
|
||||||
## value in a Host header. This is an example of a non-host based
|
## a particular value in a Host header. This is an
|
||||||
## metric since multiple IP addresses could respond for the same Host
|
## example of a non-host based metric since multiple
|
||||||
|
## IP addresses could respond for the same Host
|
||||||
## header value.
|
## header value.
|
||||||
str: string &optional;
|
str: string &optional;
|
||||||
|
|
||||||
|
@ -23,9 +28,9 @@ export {
|
||||||
host: addr &optional;
|
host: addr &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
## Represents data being added for a single metric data point.
|
## Represents data being added for a single observation.
|
||||||
## Only supply a single value here at a time.
|
## Only supply a single field at a time!
|
||||||
type DataPoint: record {
|
type Observation: record {
|
||||||
## Count value.
|
## Count value.
|
||||||
num: count &optional;
|
num: count &optional;
|
||||||
## Double value.
|
## Double value.
|
||||||
|
@ -35,102 +40,110 @@ export {
|
||||||
};
|
};
|
||||||
|
|
||||||
type Reducer: record {
|
type Reducer: record {
|
||||||
## Data stream identifier for the reducer to attach to.
|
## Observation stream identifier for the reducer
|
||||||
|
## to attach to.
|
||||||
stream: string;
|
stream: string;
|
||||||
|
|
||||||
## The calculations to perform on the data points.
|
## The calculations to perform on the data points.
|
||||||
apply: set[Calculation];
|
apply: set[Calculation];
|
||||||
|
|
||||||
## A predicate so that you can decide per key if you would like
|
## A predicate so that you can decide per key if you
|
||||||
## to accept the data being inserted.
|
## would like to accept the data being inserted.
|
||||||
pred: function(key: Measurement::Key, point: Measurement::DataPoint): bool &optional;
|
pred: function(key: SumStats::Key, obs: SumStats::Observation): bool &optional;
|
||||||
|
|
||||||
## A function to normalize the key. This can be used to aggregate or
|
## A function to normalize the key. This can be used to aggregate or
|
||||||
## normalize the entire key.
|
## normalize the entire key.
|
||||||
normalize_key: function(key: Measurement::Key): Key &optional;
|
normalize_key: function(key: SumStats::Key): Key &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
## Value calculated for a data point stream fed into a reducer.
|
## Value calculated for an observation stream fed into a reducer.
|
||||||
## Most of the fields are added by plugins.
|
## Most of the fields are added by plugins.
|
||||||
type ResultVal: record {
|
type ResultVal: record {
|
||||||
## The time when the first data point was added to this result value.
|
## The time when the first observation was added to
|
||||||
|
## this result value.
|
||||||
begin: time;
|
begin: time;
|
||||||
|
|
||||||
## The time when the last data point was added to this result value.
|
## The time when the last observation was added to
|
||||||
|
## this result value.
|
||||||
end: time;
|
end: time;
|
||||||
|
|
||||||
## The number of measurements received.
|
## The number of observations received.
|
||||||
num: count &default=0;
|
num: count &default=0;
|
||||||
};
|
};
|
||||||
|
|
||||||
## Type to store results for multiple reducers.
|
## Type to store results for multiple reducers.
|
||||||
type Result: table[string] of ResultVal;
|
type Result: table[string] of ResultVal;
|
||||||
|
|
||||||
## Type to store a table of measurement results indexed by the measurement key.
|
## Type to store a table of sumstats results indexed
|
||||||
|
## by keys.
|
||||||
type ResultTable: table[Key] of Result;
|
type ResultTable: table[Key] of Result;
|
||||||
|
|
||||||
## Measurements represent an aggregation of reducers along with
|
## SumStats represent an aggregation of reducers along with
|
||||||
## mechanisms to handle various situations like the epoch ending
|
## mechanisms to handle various situations like the epoch ending
|
||||||
## or thresholds being crossed.
|
## or thresholds being crossed.
|
||||||
type Measurement: record {
|
## It's best to not access any global state outside
|
||||||
## The interval at which this filter should be "broken" and the
|
## of the variables given to the callbacks because there
|
||||||
## '$epoch_finished' callback called. The results are also reset
|
## is no assurance provided as to where the callbacks
|
||||||
## at this time so any threshold based detection needs to be set to a
|
## will be executed on clusters.
|
||||||
## number that should be expected to happen within this epoch.
|
type SumStat: record {
|
||||||
|
## The interval at which this filter should be "broken"
|
||||||
|
## and the '$epoch_finished' callback called. The
|
||||||
|
## results are also reset at this time so any threshold
|
||||||
|
## based detection needs to be set to a
|
||||||
|
## value that should be expected to happen within
|
||||||
|
## this epoch.
|
||||||
epoch: interval;
|
epoch: interval;
|
||||||
|
|
||||||
## The reducers for the measurement indexed by data id.
|
## The reducers for the SumStat
|
||||||
reducers: set[Reducer];
|
reducers: set[Reducer];
|
||||||
|
|
||||||
## Provide a function to calculate a value from the :bro:see:`Result`
|
## Provide a function to calculate a value from the
|
||||||
## structure which will be used for thresholding.
|
## :bro:see:`Result` structure which will be used
|
||||||
threshold_val: function(key: Measurement::Key, result: Measurement::Result): count &optional;
|
## for thresholding.
|
||||||
|
## This is required if a $threshold value is given.
|
||||||
|
threshold_val: function(key: SumStats::Key, result: SumStats::Result): count &optional;
|
||||||
|
|
||||||
## The threshold value for calling the $threshold_crossed callback.
|
## The threshold value for calling the
|
||||||
|
## $threshold_crossed callback.
|
||||||
threshold: count &optional;
|
threshold: count &optional;
|
||||||
|
|
||||||
## A series of thresholds for calling the $threshold_crossed callback.
|
## A series of thresholds for calling the
|
||||||
|
## $threshold_crossed callback.
|
||||||
threshold_series: vector of count &optional;
|
threshold_series: vector of count &optional;
|
||||||
|
|
||||||
## A callback that is called when a threshold is crossed.
|
## A callback that is called when a threshold is crossed.
|
||||||
threshold_crossed: function(key: Measurement::Key, result: Measurement::Result) &optional;
|
threshold_crossed: function(key: SumStats::Key, result: SumStats::Result) &optional;
|
||||||
|
|
||||||
## A callback with the full collection of Results for this filter.
|
## A callback with the full collection of Results for
|
||||||
## It's best to not access any global state outside of the variables
|
## this SumStat.
|
||||||
## given to the callback because there is no assurance provided as to
|
epoch_finished: function(rt: SumStats::ResultTable) &optional;
|
||||||
## where the callback will be executed on clusters.
|
|
||||||
epoch_finished: function(rt: Measurement::ResultTable) &optional;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
## Create a measurement.
|
## Create a summary statistic.
|
||||||
global create: function(m: Measurement::Measurement);
|
global create: function(m: SumStats::SumStat);
|
||||||
|
|
||||||
## Add data into a data point stream. This should be called when
|
## Add data into an observation stream. This should be
|
||||||
## a script has measured some point value.
|
## called when a script has measured some point value.
|
||||||
##
|
##
|
||||||
## id: The stream identifier that the data point represents.
|
## id: The observation stream identifier that the data
|
||||||
|
## point represents.
|
||||||
##
|
##
|
||||||
## key: The measurement key that the value is to be added to.
|
## key: The key that the value is related to.
|
||||||
##
|
##
|
||||||
## point: The data point to send into the stream.
|
## obs: The data point to send into the stream.
|
||||||
global add_data: function(id: string, key: Measurement::Key, point: Measurement::DataPoint);
|
global observe: function(id: string, key: SumStats::Key, obs: SumStats::Observation);
|
||||||
|
|
||||||
## Helper function to represent a :bro:type:`Measurement::Key` value as
|
## Helper function to represent a :bro:type:`SumStats::Key` value as
|
||||||
## a simple string.
|
## a simple string.
|
||||||
##
|
##
|
||||||
## key: The metric key that is to be converted into a string.
|
## key: The metric key that is to be converted into a string.
|
||||||
##
|
##
|
||||||
## Returns: A string representation of the metric key.
|
## Returns: A string representation of the metric key.
|
||||||
global key2str: function(key: Measurement::Key): string;
|
global key2str: function(key: SumStats::Key): string;
|
||||||
|
|
||||||
## This event is generated for each new measurement that is created.
|
|
||||||
##
|
|
||||||
## m: The record which describes a measurement.
|
|
||||||
global new_measurement: event(m: Measurement);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
redef record Reducer += {
|
redef record Reducer += {
|
||||||
# Internal use only. Provides a reference back to the related Measurement by it's ID.
|
# Internal use only. Provides a reference back to the related SumStats by it's ID.
|
||||||
mid: string &optional;
|
mid: string &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -142,16 +155,16 @@ type Thresholding: record {
|
||||||
threshold_series_index: count &default=0;
|
threshold_series_index: count &default=0;
|
||||||
};
|
};
|
||||||
|
|
||||||
# Internal use only. For tracking thresholds per measurement and key.
|
# Internal use only. For tracking thresholds per sumstat and key.
|
||||||
global threshold_tracker: table[string] of table[Key] of Thresholding &optional;
|
global threshold_tracker: table[string] of table[Key] of Thresholding &optional;
|
||||||
|
|
||||||
redef record Measurement += {
|
redef record SumStats += {
|
||||||
# Internal use only (mostly for cluster coherency).
|
# Internal use only (mostly for cluster coherency).
|
||||||
id: string &optional;
|
id: string &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
# Store of measurements indexed on the measurement id.
|
# Store of sumstats indexed on the sumstat id.
|
||||||
global measurement_store: table[string] of Measurement = table();
|
global stats_store: table[string] of SumStats = table();
|
||||||
|
|
||||||
# Store of reducers indexed on the data point stream id.
|
# Store of reducers indexed on the data point stream id.
|
||||||
global reducer_store: table[string] of set[Reducer] = table();
|
global reducer_store: table[string] of set[Reducer] = table();
|
||||||
|
@ -166,10 +179,10 @@ global thresholds_store: table[string, Key] of bool = table();
|
||||||
# key values are updated and the new val is given as the `val` argument.
|
# key values are updated and the new val is given as the `val` argument.
|
||||||
# It's only prototyped here because cluster and non-cluster have separate
|
# It's only prototyped here because cluster and non-cluster have separate
|
||||||
# implementations.
|
# implementations.
|
||||||
global data_added: function(m: Measurement, key: Key, result: Result);
|
global data_added: function(m: SumStats, key: Key, result: Result);
|
||||||
|
|
||||||
# Prototype the hook point for plugins to do calculations.
|
# Prototype the hook point for plugins to do calculations.
|
||||||
global add_to_reducer_hook: hook(r: Reducer, val: double, data: DataPoint, rv: ResultVal);
|
global add_to_reducer_hook: hook(r: Reducer, val: double, data: Observation, rv: ResultVal);
|
||||||
# Prototype the hook point for plugins to initialize any result values.
|
# Prototype the hook point for plugins to initialize any result values.
|
||||||
global init_resultval_hook: hook(r: Reducer, rv: ResultVal);
|
global init_resultval_hook: hook(r: Reducer, rv: ResultVal);
|
||||||
# Prototype the hook point for plugins to merge Results.
|
# Prototype the hook point for plugins to merge Results.
|
||||||
|
@ -177,7 +190,7 @@ global compose_resultvals_hook: hook(result: ResultVal, rv1: ResultVal, rv2: Res
|
||||||
|
|
||||||
# Event that is used to "finish" measurements and adapt the measurement
|
# Event that is used to "finish" measurements and adapt the measurement
|
||||||
# framework for clustered or non-clustered usage.
|
# framework for clustered or non-clustered usage.
|
||||||
global finish_epoch: event(m: Measurement);
|
global finish_epoch: event(m: SumStats);
|
||||||
|
|
||||||
function key2str(key: Key): string
|
function key2str(key: Key): string
|
||||||
{
|
{
|
||||||
|
@ -186,7 +199,7 @@ function key2str(key: Key): string
|
||||||
out = fmt("%shost=%s", out, key$host);
|
out = fmt("%shost=%s", out, key$host);
|
||||||
if ( key?$str )
|
if ( key?$str )
|
||||||
out = fmt("%s%sstr=%s", out, |out|==0 ? "" : ", ", key$str);
|
out = fmt("%s%sstr=%s", out, |out|==0 ? "" : ", ", key$str);
|
||||||
return fmt("measurement_key(%s)", out);
|
return fmt("sumstats_key(%s)", out);
|
||||||
}
|
}
|
||||||
|
|
||||||
function init_resultval(r: Reducer): ResultVal
|
function init_resultval(r: Reducer): ResultVal
|
||||||
|
@ -200,17 +213,12 @@ function compose_resultvals(rv1: ResultVal, rv2: ResultVal): ResultVal
|
||||||
{
|
{
|
||||||
local result: ResultVal;
|
local result: ResultVal;
|
||||||
|
|
||||||
# Merge $begin (take the earliest one)
|
|
||||||
result$begin = (rv1$begin < rv2$begin) ? rv1$begin : rv2$begin;
|
result$begin = (rv1$begin < rv2$begin) ? rv1$begin : rv2$begin;
|
||||||
|
|
||||||
# Merge $end (take the latest one)
|
|
||||||
result$end = (rv1$end > rv2$end) ? rv1$end : rv2$end;
|
result$end = (rv1$end > rv2$end) ? rv1$end : rv2$end;
|
||||||
|
|
||||||
# Merge $num
|
|
||||||
result$num = rv1$num + rv2$num;
|
result$num = rv1$num + rv2$num;
|
||||||
|
|
||||||
|
# Run the plugin composition hooks.
|
||||||
hook compose_resultvals_hook(result, rv1, rv2);
|
hook compose_resultvals_hook(result, rv1, rv2);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -243,59 +251,59 @@ function compose_results(r1: Result, r2: Result): Result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function reset(m: Measurement)
|
function reset(ss: SumStat)
|
||||||
{
|
{
|
||||||
if ( m$id in result_store )
|
if ( ss$id in result_store )
|
||||||
delete result_store[m$id];
|
delete result_store[ss$id];
|
||||||
|
|
||||||
result_store[m$id] = table();
|
result_store[ss$id] = table();
|
||||||
threshold_tracker[m$id] = table();
|
threshold_tracker[ss$id] = table();
|
||||||
}
|
}
|
||||||
|
|
||||||
function create(m: Measurement)
|
function create(ss: SumStat)
|
||||||
{
|
{
|
||||||
if ( (m?$threshold || m?$threshold_series) && ! m?$threshold_val )
|
if ( (ss?$threshold || ss?$threshold_series) && ! ss?$threshold_val )
|
||||||
{
|
{
|
||||||
Reporter::error("Measurement given a threshold with no $threshold_val function");
|
Reporter::error("SumStats given a threshold with no $threshold_val function");
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( ! m?$id )
|
if ( ! ss?$id )
|
||||||
m$id=unique_id("");
|
ss$id=unique_id("");
|
||||||
threshold_tracker[m$id] = table();
|
threshold_tracker[ss$id] = table();
|
||||||
measurement_store[m$id] = m;
|
stats_store[ss$id] = ss;
|
||||||
|
|
||||||
for ( reducer in m$reducers )
|
for ( reducer in ss$reducers )
|
||||||
{
|
{
|
||||||
reducer$mid = m$id;
|
reducer$mid = ss$id;
|
||||||
if ( reducer$stream !in reducer_store )
|
if ( reducer$stream !in reducer_store )
|
||||||
reducer_store[reducer$stream] = set();
|
reducer_store[reducer$stream] = set();
|
||||||
add reducer_store[reducer$stream][reducer];
|
add reducer_store[reducer$stream][reducer];
|
||||||
}
|
}
|
||||||
|
|
||||||
reset(m);
|
reset(ss);
|
||||||
schedule m$epoch { Measurement::finish_epoch(m) };
|
schedule ss$epoch { SumStats::finish_epoch(ss) };
|
||||||
}
|
}
|
||||||
|
|
||||||
function add_data(id: string, key: Key, point: DataPoint)
|
function observe(id: string, key: Key, obs: Observation)
|
||||||
{
|
{
|
||||||
# Try to add the data to all of the defined reducers.
|
|
||||||
if ( id !in reducer_store )
|
if ( id !in reducer_store )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
# Try to add the data to all of the defined reducers.
|
||||||
for ( r in reducer_store[id] )
|
for ( r in reducer_store[id] )
|
||||||
{
|
{
|
||||||
# If this reducer has a predicate, run the predicate
|
# If this reducer has a predicate, run the predicate
|
||||||
# and skip this key if the predicate return false.
|
# and skip this key if the predicate return false.
|
||||||
if ( r?$pred && ! r$pred(key, point) )
|
if ( r?$pred && ! r$pred(key, obs) )
|
||||||
next;
|
next;
|
||||||
|
|
||||||
if ( r?$normalize_key )
|
if ( r?$normalize_key )
|
||||||
key = r$normalize_key(copy(key));
|
key = r$normalize_key(copy(key));
|
||||||
|
|
||||||
local m = measurement_store[r$mid];
|
local ss = stats_store[r$mid];
|
||||||
|
|
||||||
if ( r$mid !in result_store )
|
if ( r$mid !in result_store )
|
||||||
result_store[m$id] = table();
|
result_store[ss$id] = table();
|
||||||
local results = result_store[r$mid];
|
local results = result_store[r$mid];
|
||||||
|
|
||||||
if ( key !in results )
|
if ( key !in results )
|
||||||
|
@ -312,56 +320,56 @@ function add_data(id: string, key: Key, point: DataPoint)
|
||||||
|
|
||||||
# If a string was given, fall back to 1.0 as the value.
|
# If a string was given, fall back to 1.0 as the value.
|
||||||
local val = 1.0;
|
local val = 1.0;
|
||||||
if ( point?$num || point?$dbl )
|
if ( obs?$num || obs?$dbl )
|
||||||
val = point?$dbl ? point$dbl : point$num;
|
val = obs?$dbl ? obs$dbl : obs$num;
|
||||||
|
|
||||||
hook add_to_reducer_hook(r, val, point, result_val);
|
hook add_to_reducer_hook(r, val, obs, result_val);
|
||||||
data_added(m, key, result);
|
data_added(ss, key, result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# This function checks if a threshold has been crossed. It is also used as a method to implement
|
# This function checks if a threshold has been crossed. It is also used as a method to implement
|
||||||
# mid-break-interval threshold crossing detection for cluster deployments.
|
# mid-break-interval threshold crossing detection for cluster deployments.
|
||||||
function check_thresholds(m: Measurement, key: Key, result: Result, modify_pct: double): bool
|
function check_thresholds(ss: SumStat, key: Key, result: Result, modify_pct: double): bool
|
||||||
{
|
{
|
||||||
if ( ! (m?$threshold || m?$threshold_series) )
|
if ( ! (ss?$threshold || ss?$threshold_series) )
|
||||||
return F;
|
return F;
|
||||||
|
|
||||||
# Add in the extra ResultVals to make threshold_vals easier to write.
|
# Add in the extra ResultVals to make threshold_vals easier to write.
|
||||||
if ( |m$reducers| != |result| )
|
if ( |ss$reducers| != |result| )
|
||||||
{
|
{
|
||||||
for ( reducer in m$reducers )
|
for ( reducer in ss$reducers )
|
||||||
{
|
{
|
||||||
if ( reducer$stream !in result )
|
if ( reducer$stream !in result )
|
||||||
result[reducer$stream] = init_resultval(reducer);
|
result[reducer$stream] = init_resultval(reducer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
local watch = m$threshold_val(key, result);
|
local watch = ss$threshold_val(key, result);
|
||||||
|
|
||||||
if ( modify_pct < 1.0 && modify_pct > 0.0 )
|
if ( modify_pct < 1.0 && modify_pct > 0.0 )
|
||||||
watch = double_to_count(floor(watch/modify_pct));
|
watch = double_to_count(floor(watch/modify_pct));
|
||||||
|
|
||||||
if ( m$id !in threshold_tracker )
|
if ( ss$id !in threshold_tracker )
|
||||||
threshold_tracker[m$id] = table();
|
threshold_tracker[ss$id] = table();
|
||||||
local t_tracker = threshold_tracker[m$id];
|
local t_tracker = threshold_tracker[ss$id];
|
||||||
|
|
||||||
if ( key !in t_tracker )
|
if ( key !in t_tracker )
|
||||||
{
|
{
|
||||||
local ttmp: Thresholding;
|
local ttmp: Thresholding;
|
||||||
t_tracker[key] = ttmp;
|
t_tracker[key] = ttmp;
|
||||||
}
|
}
|
||||||
local tt = threshold_tracker[m$id][key];
|
local tt = t_tracker[key];
|
||||||
|
|
||||||
if ( m?$threshold && ! tt$is_threshold_crossed && watch >= m$threshold )
|
if ( ss?$threshold && ! tt$is_threshold_crossed && watch >= ss$threshold )
|
||||||
{
|
{
|
||||||
# Value crossed the threshold.
|
# Value crossed the threshold.
|
||||||
return T;
|
return T;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( m?$threshold_series &&
|
if ( ss?$threshold_series &&
|
||||||
|m$threshold_series| >= tt$threshold_series_index &&
|
|ss$threshold_series| >= tt$threshold_series_index &&
|
||||||
watch >= m$threshold_series[tt$threshold_series_index] )
|
watch >= ss$threshold_series[tt$threshold_series_index] )
|
||||||
{
|
{
|
||||||
# A threshold series was given and the value crossed the next
|
# A threshold series was given and the value crossed the next
|
||||||
# value in the series.
|
# value in the series.
|
||||||
|
@ -371,28 +379,28 @@ function check_thresholds(m: Measurement, key: Key, result: Result, modify_pct:
|
||||||
return F;
|
return F;
|
||||||
}
|
}
|
||||||
|
|
||||||
function threshold_crossed(m: Measurement, key: Key, result: Result)
|
function threshold_crossed(ss: SumStat, key: Key, result: Result)
|
||||||
{
|
{
|
||||||
# If there is no callback, there is no point in any of this.
|
# If there is no callback, there is no point in any of this.
|
||||||
if ( ! m?$threshold_crossed )
|
if ( ! ss?$threshold_crossed )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
# Add in the extra ResultVals to make threshold_crossed callbacks easier to write.
|
# Add in the extra ResultVals to make threshold_crossed callbacks easier to write.
|
||||||
if ( |m$reducers| != |result| )
|
if ( |ss$reducers| != |result| )
|
||||||
{
|
{
|
||||||
for ( reducer in m$reducers )
|
for ( reducer in ss$reducers )
|
||||||
{
|
{
|
||||||
if ( reducer$stream !in result )
|
if ( reducer$stream !in result )
|
||||||
result[reducer$stream] = init_resultval(reducer);
|
result[reducer$stream] = init_resultval(reducer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m$threshold_crossed(key, result);
|
ss$threshold_crossed(key, result);
|
||||||
local tt = threshold_tracker[m$id][key];
|
local tt = threshold_tracker[ss$id][key];
|
||||||
tt$is_threshold_crossed = T;
|
tt$is_threshold_crossed = T;
|
||||||
|
|
||||||
# Bump up to the next threshold series index if a threshold series is being used.
|
# Bump up to the next threshold series index if a threshold series is being used.
|
||||||
if ( m?$threshold_series )
|
if ( ss?$threshold_series )
|
||||||
++tt$threshold_series_index;
|
++tt$threshold_series_index;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
@load ./main
|
@load ./main
|
||||||
|
|
||||||
module Measurement;
|
module SumStats;
|
||||||
|
|
||||||
event Measurement::finish_epoch(m: Measurement)
|
event SumStats::finish_epoch(m: SumStats)
|
||||||
{
|
{
|
||||||
if ( m$id in result_store )
|
if ( m$id in result_store )
|
||||||
{
|
{
|
||||||
|
@ -13,11 +13,11 @@ event Measurement::finish_epoch(m: Measurement)
|
||||||
reset(m);
|
reset(m);
|
||||||
}
|
}
|
||||||
|
|
||||||
schedule m$epoch { Measurement::finish_epoch(m) };
|
schedule m$epoch { SumStats::finish_epoch(m) };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function data_added(m: Measurement, key: Key, result: Result)
|
function data_added(m: SumStats, key: Key, result: Result)
|
||||||
{
|
{
|
||||||
if ( check_thresholds(m, key, result, 1.0) )
|
if ( check_thresholds(m, key, result, 1.0) )
|
||||||
threshold_crossed(m, key, result);
|
threshold_crossed(m, key, result);
|
|
@ -2,7 +2,7 @@
|
||||||
@load ./max
|
@load ./max
|
||||||
@load ./min
|
@load ./min
|
||||||
@load ./sample
|
@load ./sample
|
||||||
|
@load ./variance
|
||||||
@load ./std-dev
|
@load ./std-dev
|
||||||
@load ./sum
|
@load ./sum
|
||||||
@load ./unique
|
@load ./unique
|
||||||
@load ./variance
|
|
|
@ -1,6 +1,6 @@
|
||||||
@load base/frameworks/measurement
|
@load base/frameworks/sumstats
|
||||||
|
|
||||||
module Measurement;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
|
@ -14,7 +14,7 @@ export {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
hook add_to_reducer_hook(r: Reducer, val: double, data: DataPoint, rv: ResultVal)
|
hook add_to_reducer_hook(r: Reducer, val: double, data: Observation, rv: ResultVal)
|
||||||
{
|
{
|
||||||
if ( AVERAGE in r$apply )
|
if ( AVERAGE in r$apply )
|
||||||
{
|
{
|
|
@ -1,6 +1,6 @@
|
||||||
@load base/frameworks/measurement
|
@load base/frameworks/sumstats
|
||||||
|
|
||||||
module Measurement;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
|
@ -14,7 +14,7 @@ export {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
hook add_to_reducer_hook(r: Reducer, val: double, data: DataPoint, rv: ResultVal)
|
hook add_to_reducer_hook(r: Reducer, val: double, data: Observation, rv: ResultVal)
|
||||||
{
|
{
|
||||||
if ( MAX in r$apply )
|
if ( MAX in r$apply )
|
||||||
{
|
{
|
|
@ -1,6 +1,6 @@
|
||||||
@load base/frameworks/measurement
|
@load base/frameworks/sumstats
|
||||||
|
|
||||||
module Measurement;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
|
@ -14,7 +14,7 @@ export {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
hook add_to_reducer_hook(r: Reducer, val: double, data: DataPoint, rv: ResultVal)
|
hook add_to_reducer_hook(r: Reducer, val: double, data: Observation, rv: ResultVal)
|
||||||
{
|
{
|
||||||
if ( MIN in r$apply )
|
if ( MIN in r$apply )
|
||||||
{
|
{
|
|
@ -1,35 +1,35 @@
|
||||||
@load base/frameworks/measurement
|
@load base/frameworks/sumstats
|
||||||
@load base/utils/queue
|
@load base/utils/queue
|
||||||
|
|
||||||
module Measurement;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef record Reducer += {
|
redef record Reducer += {
|
||||||
## A number of sample DataPoints to collect.
|
## A number of sample Observations to collect.
|
||||||
samples: count &default=0;
|
samples: count &default=0;
|
||||||
};
|
};
|
||||||
|
|
||||||
redef record ResultVal += {
|
redef record ResultVal += {
|
||||||
## This is the queue where samples
|
## This is the queue where samples
|
||||||
## are maintained. Use the
|
## are maintained. Use the
|
||||||
## :bro:see:`Measurement::get_samples` function
|
## :bro:see:`SumStats::get_samples` function
|
||||||
## to get a vector of the samples.
|
## to get a vector of the samples.
|
||||||
samples: Queue::Queue &optional;
|
samples: Queue::Queue &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
## Get a vector of sample DataPoint values from a ResultVal.
|
## Get a vector of sample Observation values from a ResultVal.
|
||||||
global get_samples: function(rv: ResultVal): vector of DataPoint;
|
global get_samples: function(rv: ResultVal): vector of Observation;
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_samples(rv: ResultVal): vector of DataPoint
|
function get_samples(rv: ResultVal): vector of Observation
|
||||||
{
|
{
|
||||||
local s: vector of DataPoint = vector();
|
local s: vector of Observation = vector();
|
||||||
if ( rv?$samples )
|
if ( rv?$samples )
|
||||||
Queue::get_vector(rv$samples, s);
|
Queue::get_vector(rv$samples, s);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
hook add_to_reducer_hook(r: Reducer, val: double, data: DataPoint, rv: ResultVal)
|
hook add_to_reducer_hook(r: Reducer, val: double, data: Observation, rv: ResultVal)
|
||||||
{
|
{
|
||||||
if ( r$samples > 0 )
|
if ( r$samples > 0 )
|
||||||
{
|
{
|
|
@ -1,7 +1,7 @@
|
||||||
@load ./variance
|
@load ./variance
|
||||||
@load base/frameworks/measurement
|
@load base/frameworks/sumstats
|
||||||
|
|
||||||
module Measurement;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
|
@ -22,7 +22,7 @@ function calc_std_dev(rv: ResultVal)
|
||||||
}
|
}
|
||||||
|
|
||||||
# This depends on the variance plugin which uses priority -5
|
# This depends on the variance plugin which uses priority -5
|
||||||
hook add_to_reducer_hook(r: Reducer, val: double, data: DataPoint, rv: ResultVal) &priority=-10
|
hook add_to_reducer_hook(r: Reducer, val: double, data: Observation, rv: ResultVal) &priority=-10
|
||||||
{
|
{
|
||||||
if ( STD_DEV in r$apply )
|
if ( STD_DEV in r$apply )
|
||||||
{
|
{
|
|
@ -1,6 +1,6 @@
|
||||||
@load base/frameworks/measurement
|
@load base/frameworks/sumstats
|
||||||
|
|
||||||
module Measurement;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
|
@ -14,13 +14,13 @@ export {
|
||||||
sum: double &default=0.0;
|
sum: double &default=0.0;
|
||||||
};
|
};
|
||||||
|
|
||||||
type threshold_function: function(key: Measurement::Key, result: Measurement::Result): count;
|
type threshold_function: function(key: SumStats::Key, result: SumStats::Result): count;
|
||||||
global sum_threshold: function(data_id: string): threshold_function;
|
global sum_threshold: function(data_id: string): threshold_function;
|
||||||
}
|
}
|
||||||
|
|
||||||
function sum_threshold(data_id: string): threshold_function
|
function sum_threshold(data_id: string): threshold_function
|
||||||
{
|
{
|
||||||
return function(key: Measurement::Key, result: Measurement::Result): count
|
return function(key: SumStats::Key, result: SumStats::Result): count
|
||||||
{
|
{
|
||||||
print fmt("data_id: %s", data_id);
|
print fmt("data_id: %s", data_id);
|
||||||
print result;
|
print result;
|
||||||
|
@ -34,7 +34,7 @@ hook init_resultval_hook(r: Reducer, rv: ResultVal)
|
||||||
rv$sum = 0;
|
rv$sum = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
hook add_to_reducer_hook(r: Reducer, val: double, data: DataPoint, rv: ResultVal)
|
hook add_to_reducer_hook(r: Reducer, val: double, data: Observation, rv: ResultVal)
|
||||||
{
|
{
|
||||||
if ( SUM in r$apply )
|
if ( SUM in r$apply )
|
||||||
rv$sum += val;
|
rv$sum += val;
|
|
@ -1,6 +1,6 @@
|
||||||
@load base/frameworks/measurement
|
@load base/frameworks/sumstats
|
||||||
|
|
||||||
module Measurement;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
|
@ -20,10 +20,10 @@ redef record ResultVal += {
|
||||||
# because we don't want to trust that we can inspect the values
|
# because we don't want to trust that we can inspect the values
|
||||||
# since we will like move to a probalistic data structure in the future.
|
# since we will like move to a probalistic data structure in the future.
|
||||||
# TODO: in the future this will optionally be a hyperloglog structure
|
# TODO: in the future this will optionally be a hyperloglog structure
|
||||||
unique_vals: set[DataPoint] &optional;
|
unique_vals: set[Observation] &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
hook add_to_reducer_hook(r: Reducer, val: double, data: DataPoint, rv: ResultVal)
|
hook add_to_reducer_hook(r: Reducer, val: double, data: Observation, rv: ResultVal)
|
||||||
{
|
{
|
||||||
if ( UNIQUE in r$apply )
|
if ( UNIQUE in r$apply )
|
||||||
{
|
{
|
|
@ -1,7 +1,7 @@
|
||||||
@load ./average
|
@load ./average
|
||||||
@load base/frameworks/measurement
|
@load base/frameworks/sumstats
|
||||||
|
|
||||||
module Measurement;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
|
@ -29,7 +29,7 @@ function calc_variance(rv: ResultVal)
|
||||||
}
|
}
|
||||||
|
|
||||||
# Reduced priority since this depends on the average
|
# Reduced priority since this depends on the average
|
||||||
hook add_to_reducer_hook(r: Reducer, val: double, data: DataPoint, rv: ResultVal) &priority=-5
|
hook add_to_reducer_hook(r: Reducer, val: double, data: Observation, rv: ResultVal) &priority=-5
|
||||||
{
|
{
|
||||||
if ( VARIANCE in r$apply )
|
if ( VARIANCE in r$apply )
|
||||||
{
|
{
|
|
@ -5,6 +5,7 @@
|
||||||
##! Requires that :bro:id:`use_conn_size_analyzer` is set to T! The heuristic
|
##! Requires that :bro:id:`use_conn_size_analyzer` is set to T! The heuristic
|
||||||
##! is not attempted if the connection size analyzer isn't enabled.
|
##! is not attempted if the connection size analyzer isn't enabled.
|
||||||
|
|
||||||
|
@load base/protocols/conn
|
||||||
@load base/frameworks/notice
|
@load base/frameworks/notice
|
||||||
@load base/utils/site
|
@load base/utils/site
|
||||||
@load base/utils/thresholds
|
@load base/utils/thresholds
|
||||||
|
@ -115,7 +116,7 @@ function check_ssh_connection(c: connection, done: bool)
|
||||||
# Responder must have sent fewer than 40 packets.
|
# Responder must have sent fewer than 40 packets.
|
||||||
c$resp$num_pkts < 40 &&
|
c$resp$num_pkts < 40 &&
|
||||||
# If there was a content gap we can't reliably do this heuristic.
|
# If there was a content gap we can't reliably do this heuristic.
|
||||||
c$conn$missed_bytes == 0)# &&
|
c?$conn && c$conn$missed_bytes == 0)# &&
|
||||||
# Only "normal" connections can count.
|
# Only "normal" connections can count.
|
||||||
#c$conn?$conn_state && c$conn$conn_state in valid_states )
|
#c$conn?$conn_state && c$conn$conn_state in valid_states )
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
@load base/protocols/http
|
@load base/protocols/http
|
||||||
@load base/protocols/ssl
|
@load base/protocols/ssl
|
||||||
@load base/frameworks/measurement
|
@load base/frameworks/sumstats
|
||||||
|
|
||||||
module AppMeasurement;
|
module AppStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Log::ID += { LOG };
|
redef enum Log::ID += { LOG };
|
||||||
|
@ -32,60 +32,60 @@ redef record connection += {
|
||||||
|
|
||||||
event bro_init() &priority=3
|
event bro_init() &priority=3
|
||||||
{
|
{
|
||||||
Log::create_stream(AppMeasurement::LOG, [$columns=Info]);
|
Log::create_stream(AppSumStats::LOG, [$columns=Info]);
|
||||||
|
|
||||||
local r1: Measurement::Reducer = [$stream="apps.bytes", $apply=set(Measurement::SUM)];
|
local r1: SumStats::Reducer = [$stream="apps.bytes", $apply=set(SumStats::SUM)];
|
||||||
local r2: Measurement::Reducer = [$stream="apps.hits", $apply=set(Measurement::UNIQUE)];
|
local r2: SumStats::Reducer = [$stream="apps.hits", $apply=set(SumStats::UNIQUE)];
|
||||||
Measurement::create([$epoch=break_interval,
|
SumStats::create([$epoch=break_interval,
|
||||||
$reducers=set(r1, r2),
|
$reducers=set(r1, r2),
|
||||||
$epoch_finished(data: Measurement::ResultTable) =
|
$epoch_finished(data: SumStats::ResultTable) =
|
||||||
{
|
{
|
||||||
local l: Info;
|
local l: Info;
|
||||||
l$ts = network_time();
|
l$ts = network_time();
|
||||||
l$ts_delta = break_interval;
|
l$ts_delta = break_interval;
|
||||||
for ( key in data )
|
for ( key in data )
|
||||||
{
|
{
|
||||||
local result = data[key];
|
local result = data[key];
|
||||||
l$app = key$str;
|
l$app = key$str;
|
||||||
l$bytes = double_to_count(floor(result["apps.bytes"]$sum));
|
l$bytes = double_to_count(floor(result["apps.bytes"]$sum));
|
||||||
l$hits = result["apps.hits"]$num;
|
l$hits = result["apps.hits"]$num;
|
||||||
l$uniq_hosts = result["apps.hits"]$unique;
|
l$uniq_hosts = result["apps.hits"]$unique;
|
||||||
Log::write(LOG, l);
|
Log::write(LOG, l);
|
||||||
}
|
}
|
||||||
}]);
|
}]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function do_measurement(id: conn_id, hostname: string, size: count)
|
function add_sumstats(id: conn_id, hostname: string, size: count)
|
||||||
{
|
{
|
||||||
if ( /\.youtube\.com$/ in hostname && size > 512*1024 )
|
if ( /\.youtube\.com$/ in hostname && size > 512*1024 )
|
||||||
{
|
{
|
||||||
Measurement::add_data("apps.bytes", [$str="youtube"], [$num=size]);
|
SumStats::observe("apps.bytes", [$str="youtube"], [$num=size]);
|
||||||
Measurement::add_data("apps.hits", [$str="youtube"], [$str=cat(id$orig_h)]);
|
SumStats::observe("apps.hits", [$str="youtube"], [$str=cat(id$orig_h)]);
|
||||||
}
|
}
|
||||||
else if ( /(\.facebook\.com|\.fbcdn\.net)$/ in hostname && size > 20 )
|
else if ( /(\.facebook\.com|\.fbcdn\.net)$/ in hostname && size > 20 )
|
||||||
{
|
{
|
||||||
Measurement::add_data("apps.bytes", [$str="facebook"], [$num=size]);
|
SumStats::observe("apps.bytes", [$str="facebook"], [$num=size]);
|
||||||
Measurement::add_data("apps.hits", [$str="facebook"], [$str=cat(id$orig_h)]);
|
SumStats::observe("apps.hits", [$str="facebook"], [$str=cat(id$orig_h)]);
|
||||||
}
|
}
|
||||||
else if ( /\.google\.com$/ in hostname && size > 20 )
|
else if ( /\.google\.com$/ in hostname && size > 20 )
|
||||||
{
|
{
|
||||||
Measurement::add_data("apps.bytes", [$str="google"], [$num=size]);
|
SumStats::observe("apps.bytes", [$str="google"], [$num=size]);
|
||||||
Measurement::add_data("apps.hits", [$str="google"], [$str=cat(id$orig_h)]);
|
SumStats::observe("apps.hits", [$str="google"], [$str=cat(id$orig_h)]);
|
||||||
}
|
}
|
||||||
else if ( /\.nflximg\.com$/ in hostname && size > 200*1024 )
|
else if ( /\.nflximg\.com$/ in hostname && size > 200*1024 )
|
||||||
{
|
{
|
||||||
Measurement::add_data("apps.bytes", [$str="netflix"], [$num=size]);
|
SumStats::observe("apps.bytes", [$str="netflix"], [$num=size]);
|
||||||
Measurement::add_data("apps.hits", [$str="netflix"], [$str=cat(id$orig_h)]);
|
SumStats::observe("apps.hits", [$str="netflix"], [$str=cat(id$orig_h)]);
|
||||||
}
|
}
|
||||||
else if ( /\.(pandora|p-cdn)\.com$/ in hostname && size > 512*1024 )
|
else if ( /\.(pandora|p-cdn)\.com$/ in hostname && size > 512*1024 )
|
||||||
{
|
{
|
||||||
Measurement::add_data("apps.bytes", [$str="pandora"], [$num=size]);
|
SumStats::observe("apps.bytes", [$str="pandora"], [$num=size]);
|
||||||
Measurement::add_data("apps.hits", [$str="pandora"], [$str=cat(id$orig_h)]);
|
SumStats::observe("apps.hits", [$str="pandora"], [$str=cat(id$orig_h)]);
|
||||||
}
|
}
|
||||||
else if ( /\.gmail\.com$/ in hostname && size > 20 )
|
else if ( /\.gmail\.com$/ in hostname && size > 20 )
|
||||||
{
|
{
|
||||||
Measurement::add_data("apps.bytes", [$str="gmail"], [$num=size]);
|
SumStats::observe("apps.bytes", [$str="gmail"], [$num=size]);
|
||||||
Measurement::add_data("apps.hits", [$str="gmail"], [$str=cat(id$orig_h)]);
|
SumStats::observe("apps.hits", [$str="gmail"], [$str=cat(id$orig_h)]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,11 +99,11 @@ event ssl_established(c: connection)
|
||||||
event connection_finished(c: connection)
|
event connection_finished(c: connection)
|
||||||
{
|
{
|
||||||
if ( c?$resp_hostname )
|
if ( c?$resp_hostname )
|
||||||
do_measurement(c$id, c$resp_hostname, c$resp$size);
|
add_sumstats(c$id, c$resp_hostname, c$resp$size);
|
||||||
}
|
}
|
||||||
|
|
||||||
event HTTP::log_http(rec: HTTP::Info)
|
event HTTP::log_http(rec: HTTP::Info)
|
||||||
{
|
{
|
||||||
if( rec?$host )
|
if( rec?$host )
|
||||||
do_measurement(rec$id, rec$host, rec$response_body_len);
|
add_sumstats(rec$id, rec$host, rec$response_body_len);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
##! toward hosts that have sent low TTL packets.
|
##! toward hosts that have sent low TTL packets.
|
||||||
##! It generates a notice when the number of ICMP Time Exceeded
|
##! It generates a notice when the number of ICMP Time Exceeded
|
||||||
##! messages for a source-destination pair exceeds threshold
|
##! messages for a source-destination pair exceeds threshold
|
||||||
@load base/frameworks/measurement
|
@load base/frameworks/sumstats
|
||||||
@load base/frameworks/signatures
|
@load base/frameworks/signatures
|
||||||
@load-sigs ./detect-low-ttls.sig
|
@load-sigs ./detect-low-ttls.sig
|
||||||
|
|
||||||
|
@ -53,41 +53,41 @@ event bro_init() &priority=5
|
||||||
{
|
{
|
||||||
Log::create_stream(Traceroute::LOG, [$columns=Info, $ev=log_traceroute]);
|
Log::create_stream(Traceroute::LOG, [$columns=Info, $ev=log_traceroute]);
|
||||||
|
|
||||||
local r1: Measurement::Reducer = [$stream="traceroute.time_exceeded", $apply=set(Measurement::UNIQUE)];
|
local r1: SumStats::Reducer = [$stream="traceroute.time_exceeded", $apply=set(SumStats::UNIQUE)];
|
||||||
local r2: Measurement::Reducer = [$stream="traceroute.low_ttl_packet", $apply=set(Measurement::SUM)];
|
local r2: SumStats::Reducer = [$stream="traceroute.low_ttl_packet", $apply=set(SumStats::SUM)];
|
||||||
Measurement::create([$epoch=icmp_time_exceeded_interval,
|
SumStats::create([$epoch=icmp_time_exceeded_interval,
|
||||||
$reducers=set(r1, r2),
|
$reducers=set(r1, r2),
|
||||||
$threshold_val(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
# Give a threshold value of zero depending on if the host
|
# Give a threshold value of zero depending on if the host
|
||||||
# sends a low ttl packet.
|
# sends a low ttl packet.
|
||||||
if ( require_low_ttl_packets && result["traceroute.low_ttl_packet"]$sum == 0 )
|
if ( require_low_ttl_packets && result["traceroute.low_ttl_packet"]$sum == 0 )
|
||||||
return 0;
|
return 0;
|
||||||
else
|
else
|
||||||
return result["traceroute.time_exceeded"]$unique;
|
return result["traceroute.time_exceeded"]$unique;
|
||||||
},
|
},
|
||||||
$threshold=icmp_time_exceeded_threshold,
|
$threshold=icmp_time_exceeded_threshold,
|
||||||
$threshold_crossed(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
local parts = split1(key$str, /-/);
|
local parts = split1(key$str, /-/);
|
||||||
local src = to_addr(parts[1]);
|
local src = to_addr(parts[1]);
|
||||||
local dst = to_addr(parts[2]);
|
local dst = to_addr(parts[2]);
|
||||||
Log::write(LOG, [$ts=network_time(), $src=src, $dst=dst]);
|
Log::write(LOG, [$ts=network_time(), $src=src, $dst=dst]);
|
||||||
NOTICE([$note=Traceroute::Detected,
|
NOTICE([$note=Traceroute::Detected,
|
||||||
$msg=fmt("%s seems to be running traceroute", src),
|
$msg=fmt("%s seems to be running traceroute", src),
|
||||||
$src=src, $dst=dst,
|
$src=src, $dst=dst,
|
||||||
$identifier=cat(src)]);
|
$identifier=cat(src)]);
|
||||||
}]);
|
}]);
|
||||||
}
|
}
|
||||||
|
|
||||||
# Low TTL packets are detected with a signature.
|
# Low TTL packets are detected with a signature.
|
||||||
event signature_match(state: signature_state, msg: string, data: string)
|
event signature_match(state: signature_state, msg: string, data: string)
|
||||||
{
|
{
|
||||||
if ( state$sig_id == /traceroute-detector.*/ )
|
if ( state$sig_id == /traceroute-detector.*/ )
|
||||||
Measurement::add_data("traceroute.low_ttl_packet", [$str=cat(state$conn$id$orig_h,"-",state$conn$id$resp_h)], [$num=1]);
|
SumStats::observe("traceroute.low_ttl_packet", [$str=cat(state$conn$id$orig_h,"-",state$conn$id$resp_h)], [$num=1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event icmp_time_exceeded(c: connection, icmp: icmp_conn, code: count, context: icmp_context)
|
event icmp_time_exceeded(c: connection, icmp: icmp_conn, code: count, context: icmp_context)
|
||||||
{
|
{
|
||||||
Measurement::add_data("traceroute.time_exceeded", [$str=cat(context$id$orig_h,"-",context$id$resp_h)], [$str=cat(c$id$orig_h)]);
|
SumStats::observe("traceroute.time_exceeded", [$str=cat(context$id$orig_h,"-",context$id$resp_h)], [$str=cat(c$id$orig_h)]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
##! All the authors of the old scan.bro
|
##! All the authors of the old scan.bro
|
||||||
|
|
||||||
@load base/frameworks/notice
|
@load base/frameworks/notice
|
||||||
@load base/frameworks/measurement
|
@load base/frameworks/sumstats
|
||||||
|
|
||||||
@load base/utils/time
|
@load base/utils/time
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ export {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#function check_addr_scan_threshold(key: Measurement::Key, val: Measurement::Result): bool
|
#function check_addr_scan_threshold(key: SumStats::Key, val: SumStats::Result): bool
|
||||||
# {
|
# {
|
||||||
# # We don't need to do this if no custom thresholds are defined.
|
# # We don't need to do this if no custom thresholds are defined.
|
||||||
# if ( |addr_scan_custom_thresholds| == 0 )
|
# if ( |addr_scan_custom_thresholds| == 0 )
|
||||||
|
@ -65,54 +65,54 @@ export {
|
||||||
|
|
||||||
event bro_init() &priority=5
|
event bro_init() &priority=5
|
||||||
{
|
{
|
||||||
local r1: Measurement::Reducer = [$stream="scan.addr.fail", $apply=set(Measurement::UNIQUE)];
|
local r1: SumStats::Reducer = [$stream="scan.addr.fail", $apply=set(SumStats::UNIQUE)];
|
||||||
Measurement::create([$epoch=addr_scan_interval,
|
SumStats::create([$epoch=addr_scan_interval,
|
||||||
$reducers=set(r1),
|
$reducers=set(r1),
|
||||||
$threshold_val(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
return double_to_count(result["scan.addr.fail"]$unique);
|
return double_to_count(result["scan.addr.fail"]$unique);
|
||||||
},
|
},
|
||||||
#$threshold_func=check_addr_scan_threshold,
|
#$threshold_func=check_addr_scan_threshold,
|
||||||
$threshold=addr_scan_threshold,
|
$threshold=addr_scan_threshold,
|
||||||
$threshold_crossed(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
local r = result["scan.addr.fail"];
|
local r = result["scan.addr.fail"];
|
||||||
local side = Site::is_local_addr(key$host) ? "local" : "remote";
|
local side = Site::is_local_addr(key$host) ? "local" : "remote";
|
||||||
local dur = duration_to_mins_secs(r$end-r$begin);
|
local dur = duration_to_mins_secs(r$end-r$begin);
|
||||||
local message=fmt("%s scanned at least %d unique hosts on port %s in %s", key$host, r$unique, key$str, dur);
|
local message=fmt("%s scanned at least %d unique hosts on port %s in %s", key$host, r$unique, key$str, dur);
|
||||||
NOTICE([$note=Address_Scan,
|
NOTICE([$note=Address_Scan,
|
||||||
$src=key$host,
|
$src=key$host,
|
||||||
$p=to_port(key$str),
|
$p=to_port(key$str),
|
||||||
$sub=side,
|
$sub=side,
|
||||||
$msg=message,
|
$msg=message,
|
||||||
$identifier=cat(key$host)]);
|
$identifier=cat(key$host)]);
|
||||||
}]);
|
}]);
|
||||||
|
|
||||||
# Note: port scans are tracked similar to: table[src_ip, dst_ip] of set(port);
|
# Note: port scans are tracked similar to: table[src_ip, dst_ip] of set(port);
|
||||||
local r2: Measurement::Reducer = [$stream="scan.port.fail", $apply=set(Measurement::UNIQUE)];
|
local r2: SumStats::Reducer = [$stream="scan.port.fail", $apply=set(SumStats::UNIQUE)];
|
||||||
Measurement::create([$epoch=port_scan_interval,
|
SumStats::create([$epoch=port_scan_interval,
|
||||||
$reducers=set(r2),
|
$reducers=set(r2),
|
||||||
$threshold_val(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
return double_to_count(result["scan.port.fail"]$unique);
|
return double_to_count(result["scan.port.fail"]$unique);
|
||||||
},
|
},
|
||||||
$threshold=port_scan_threshold,
|
$threshold=port_scan_threshold,
|
||||||
$threshold_crossed(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
local r = result["scan.port.fail"];
|
local r = result["scan.port.fail"];
|
||||||
local side = Site::is_local_addr(key$host) ? "local" : "remote";
|
local side = Site::is_local_addr(key$host) ? "local" : "remote";
|
||||||
local dur = duration_to_mins_secs(r$end-r$begin);
|
local dur = duration_to_mins_secs(r$end-r$begin);
|
||||||
local message = fmt("%s scanned at least %d unique ports of host %s in %s", key$host, r$unique, key$str, dur);
|
local message = fmt("%s scanned at least %d unique ports of host %s in %s", key$host, r$unique, key$str, dur);
|
||||||
NOTICE([$note=Port_Scan,
|
NOTICE([$note=Port_Scan,
|
||||||
$src=key$host,
|
$src=key$host,
|
||||||
$dst=to_addr(key$str),
|
$dst=to_addr(key$str),
|
||||||
$sub=side,
|
$sub=side,
|
||||||
$msg=message,
|
$msg=message,
|
||||||
$identifier=cat(key$host)]);
|
$identifier=cat(key$host)]);
|
||||||
}]);
|
}]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function add_metrics(id: conn_id, reverse: bool)
|
function add_sumstats(id: conn_id, reverse: bool)
|
||||||
{
|
{
|
||||||
local scanner = id$orig_h;
|
local scanner = id$orig_h;
|
||||||
local victim = id$resp_h;
|
local victim = id$resp_h;
|
||||||
|
@ -150,10 +150,10 @@ function add_metrics(id: conn_id, reverse: bool)
|
||||||
# return F;
|
# return F;
|
||||||
|
|
||||||
if ( hook Scan::addr_scan_policy(scanner, victim, scanned_port) )
|
if ( hook Scan::addr_scan_policy(scanner, victim, scanned_port) )
|
||||||
Measurement::add_data("scan.addr.fail", [$host=scanner, $str=cat(scanned_port)], [$str=cat(victim)]);
|
SumStats::observe("scan.addr.fail", [$host=scanner, $str=cat(scanned_port)], [$str=cat(victim)]);
|
||||||
|
|
||||||
if ( hook Scan::port_scan_policy(scanner, victim, scanned_port) )
|
if ( hook Scan::port_scan_policy(scanner, victim, scanned_port) )
|
||||||
Measurement::add_data("scan.port.fail", [$host=scanner, $str=cat(victim)], [$str=cat(scanned_port)]);
|
SumStats::observe("scan.port.fail", [$host=scanner, $str=cat(victim)], [$str=cat(scanned_port)]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function is_failed_conn(c: connection): bool
|
function is_failed_conn(c: connection): bool
|
||||||
|
@ -193,7 +193,7 @@ event connection_attempt(c: connection)
|
||||||
if ( "H" in c$history )
|
if ( "H" in c$history )
|
||||||
is_reverse_scan = T;
|
is_reverse_scan = T;
|
||||||
|
|
||||||
add_metrics(c$id, is_reverse_scan);
|
add_sumstats(c$id, is_reverse_scan);
|
||||||
}
|
}
|
||||||
|
|
||||||
## Generated for a rejected TCP connection. This event
|
## Generated for a rejected TCP connection. This event
|
||||||
|
@ -206,7 +206,7 @@ event connection_rejected(c: connection)
|
||||||
if ( "s" in c$history )
|
if ( "s" in c$history )
|
||||||
is_reverse_scan = T;
|
is_reverse_scan = T;
|
||||||
|
|
||||||
add_metrics(c$id, is_reverse_scan);
|
add_sumstats(c$id, is_reverse_scan);
|
||||||
}
|
}
|
||||||
|
|
||||||
## Generated when an endpoint aborted a TCP connection.
|
## Generated when an endpoint aborted a TCP connection.
|
||||||
|
@ -215,16 +215,16 @@ event connection_rejected(c: connection)
|
||||||
event connection_reset(c: connection)
|
event connection_reset(c: connection)
|
||||||
{
|
{
|
||||||
if ( is_failed_conn(c) )
|
if ( is_failed_conn(c) )
|
||||||
add_metrics(c$id, F);
|
add_sumstats(c$id, F);
|
||||||
else if ( is_reverse_failed_conn(c) )
|
else if ( is_reverse_failed_conn(c) )
|
||||||
add_metrics(c$id, T);
|
add_sumstats(c$id, T);
|
||||||
}
|
}
|
||||||
|
|
||||||
## Generated for each still-open connection when Bro terminates.
|
## Generated for each still-open connection when Bro terminates.
|
||||||
event connection_pending(c: connection)
|
event connection_pending(c: connection)
|
||||||
{
|
{
|
||||||
if ( is_failed_conn(c) )
|
if ( is_failed_conn(c) )
|
||||||
add_metrics(c$id, F);
|
add_sumstats(c$id, F);
|
||||||
else if ( is_reverse_failed_conn(c) )
|
else if ( is_reverse_failed_conn(c) )
|
||||||
add_metrics(c$id, T);
|
add_sumstats(c$id, T);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
|
|
||||||
@load base/protocols/ftp
|
@load base/protocols/ftp
|
||||||
@load base/frameworks/measurement
|
@load base/frameworks/sumstats
|
||||||
|
|
||||||
@load base/utils/time
|
@load base/utils/time
|
||||||
|
|
||||||
|
@ -25,25 +25,25 @@ export {
|
||||||
|
|
||||||
event bro_init()
|
event bro_init()
|
||||||
{
|
{
|
||||||
local r1: Measurement::Reducer = [$stream="ftp.failed_auth", $apply=set(Measurement::UNIQUE)];
|
local r1: SumStats::Reducer = [$stream="ftp.failed_auth", $apply=set(SumStats::UNIQUE)];
|
||||||
Measurement::create([$epoch=bruteforce_measurement_interval,
|
SumStats::create([$epoch=bruteforce_measurement_interval,
|
||||||
$reducers=set(r1),
|
$reducers=set(r1),
|
||||||
$threshold_val(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
return result["ftp.failed_auth"]$num;
|
return result["ftp.failed_auth"]$num;
|
||||||
},
|
},
|
||||||
$threshold=bruteforce_threshold,
|
$threshold=bruteforce_threshold,
|
||||||
$threshold_crossed(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
local r = result["ftp.failed_auth"];
|
local r = result["ftp.failed_auth"];
|
||||||
local dur = duration_to_mins_secs(r$end-r$begin);
|
local dur = duration_to_mins_secs(r$end-r$begin);
|
||||||
local plural = r$unique>1 ? "s" : "";
|
local plural = r$unique>1 ? "s" : "";
|
||||||
local message = fmt("%s had %d failed logins on %d FTP server%s in %s", key$host, r$num, r$unique, plural, dur);
|
local message = fmt("%s had %d failed logins on %d FTP server%s in %s", key$host, r$num, r$unique, plural, dur);
|
||||||
NOTICE([$note=FTP::Bruteforcing,
|
NOTICE([$note=FTP::Bruteforcing,
|
||||||
$src=key$host,
|
$src=key$host,
|
||||||
$msg=message,
|
$msg=message,
|
||||||
$identifier=cat(key$host)]);
|
$identifier=cat(key$host)]);
|
||||||
}]);
|
}]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool)
|
event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool)
|
||||||
|
@ -52,6 +52,6 @@ event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool)
|
||||||
if ( cmd == "USER" || cmd == "PASS" )
|
if ( cmd == "USER" || cmd == "PASS" )
|
||||||
{
|
{
|
||||||
if ( FTP::parse_ftp_reply_code(code)$x == 5 )
|
if ( FTP::parse_ftp_reply_code(code)$x == 5 )
|
||||||
Measurement::add_data("ftp.failed_auth", [$host=c$id$orig_h], [$str=cat(c$id$resp_h)]);
|
SumStats::observe("ftp.failed_auth", [$host=c$id$orig_h], [$str=cat(c$id$resp_h)]);
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -1,7 +1,7 @@
|
||||||
##! SQL injection attack detection in HTTP.
|
##! SQL injection attack detection in HTTP.
|
||||||
|
|
||||||
@load base/frameworks/notice
|
@load base/frameworks/notice
|
||||||
@load base/frameworks/measurement
|
@load base/frameworks/sumstats
|
||||||
@load base/protocols/http
|
@load base/protocols/http
|
||||||
|
|
||||||
module HTTP;
|
module HTTP;
|
||||||
|
@ -50,7 +50,7 @@ export {
|
||||||
| /\/\*![[:digit:]]{5}.*?\*\// &redef;
|
| /\/\*![[:digit:]]{5}.*?\*\// &redef;
|
||||||
}
|
}
|
||||||
|
|
||||||
function format_sqli_samples(samples: vector of Measurement::DataPoint): string
|
function format_sqli_samples(samples: vector of SumStats::Observation): string
|
||||||
{
|
{
|
||||||
local ret = "SQL Injection samples\n---------------------";
|
local ret = "SQL Injection samples\n---------------------";
|
||||||
for ( i in samples )
|
for ( i in samples )
|
||||||
|
@ -63,41 +63,41 @@ event bro_init() &priority=3
|
||||||
# Add filters to the metrics so that the metrics framework knows how to
|
# Add filters to the metrics so that the metrics framework knows how to
|
||||||
# determine when it looks like an actual attack and how to respond when
|
# determine when it looks like an actual attack and how to respond when
|
||||||
# thresholds are crossed.
|
# thresholds are crossed.
|
||||||
local r1: Measurement::Reducer = [$stream="http.sqli.attacker", $apply=set(Measurement::SUM), $samples=collect_SQLi_samples];
|
local r1: SumStats::Reducer = [$stream="http.sqli.attacker", $apply=set(SumStats::SUM), $samples=collect_SQLi_samples];
|
||||||
Measurement::create([$epoch=sqli_requests_interval,
|
SumStats::create([$epoch=sqli_requests_interval,
|
||||||
$reducers=set(r1),
|
$reducers=set(r1),
|
||||||
$threshold_val(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
return double_to_count(result["http.sqli.attacker"]$sum);
|
return double_to_count(result["http.sqli.attacker"]$sum);
|
||||||
},
|
},
|
||||||
$threshold=sqli_requests_threshold,
|
$threshold=sqli_requests_threshold,
|
||||||
$threshold_crossed(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
local r = result["http.sqli.attacker"];
|
local r = result["http.sqli.attacker"];
|
||||||
NOTICE([$note=SQL_Injection_Attacker,
|
NOTICE([$note=SQL_Injection_Attacker,
|
||||||
$msg="An SQL injection attacker was discovered!",
|
$msg="An SQL injection attacker was discovered!",
|
||||||
$email_body_sections=vector(format_sqli_samples(Measurement::get_samples(r))),
|
$email_body_sections=vector(format_sqli_samples(SumStats::get_samples(r))),
|
||||||
$src=key$host,
|
$src=key$host,
|
||||||
$identifier=cat(key$host)]);
|
$identifier=cat(key$host)]);
|
||||||
}]);
|
}]);
|
||||||
|
|
||||||
local r2: Measurement::Reducer = [$stream="http.sqli.victim", $apply=set(Measurement::SUM), $samples=collect_SQLi_samples];
|
local r2: SumStats::Reducer = [$stream="http.sqli.victim", $apply=set(SumStats::SUM), $samples=collect_SQLi_samples];
|
||||||
Measurement::create([$epoch=sqli_requests_interval,
|
SumStats::create([$epoch=sqli_requests_interval,
|
||||||
$reducers=set(r2),
|
$reducers=set(r2),
|
||||||
$threshold_val(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
return double_to_count(result["http.sqli.victim"]$sum);
|
return double_to_count(result["http.sqli.victim"]$sum);
|
||||||
},
|
},
|
||||||
$threshold=sqli_requests_threshold,
|
$threshold=sqli_requests_threshold,
|
||||||
$threshold_crossed(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
local r = result["http.sqli.victim"];
|
local r = result["http.sqli.victim"];
|
||||||
NOTICE([$note=SQL_Injection_Victim,
|
NOTICE([$note=SQL_Injection_Victim,
|
||||||
$msg="An SQL injection victim was discovered!",
|
$msg="An SQL injection victim was discovered!",
|
||||||
$email_body_sections=vector(format_sqli_samples(Measurement::get_samples(r))),
|
$email_body_sections=vector(format_sqli_samples(SumStats::get_samples(r))),
|
||||||
$src=key$host,
|
$src=key$host,
|
||||||
$identifier=cat(key$host)]);
|
$identifier=cat(key$host)]);
|
||||||
}]);
|
}]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event http_request(c: connection, method: string, original_URI: string,
|
event http_request(c: connection, method: string, original_URI: string,
|
||||||
|
@ -107,7 +107,7 @@ event http_request(c: connection, method: string, original_URI: string,
|
||||||
{
|
{
|
||||||
add c$http$tags[URI_SQLI];
|
add c$http$tags[URI_SQLI];
|
||||||
|
|
||||||
Measurement::add_data("http.sqli.attacker", [$host=c$id$orig_h], [$str=original_URI]);
|
SumStats::observe("http.sqli.attacker", [$host=c$id$orig_h], [$str=original_URI]);
|
||||||
Measurement::add_data("http.sqli.victim", [$host=c$id$resp_h], [$str=original_URI]);
|
SumStats::observe("http.sqli.victim", [$host=c$id$resp_h], [$str=original_URI]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,12 +18,12 @@ event bro_init() &priority=5
|
||||||
{
|
{
|
||||||
Metrics::add_filter("smtp.mailfrom", [$every=breaks,
|
Metrics::add_filter("smtp.mailfrom", [$every=breaks,
|
||||||
$measure=set(Metrics::SUM),
|
$measure=set(Metrics::SUM),
|
||||||
$pred(index: Metrics::Index, data: Metrics::DataPoint) = {
|
$pred(index: Metrics::Index, data: Metrics::Observation) = {
|
||||||
return addr_matches_host(index$host, LOCAL_HOSTS);
|
return addr_matches_host(index$host, LOCAL_HOSTS);
|
||||||
}]);
|
}]);
|
||||||
Metrics::add_filter("smtp.messages", [$every=breaks,
|
Metrics::add_filter("smtp.messages", [$every=breaks,
|
||||||
$measure=set(Metrics::SUM),
|
$measure=set(Metrics::SUM),
|
||||||
$pred(index: Metrics::Index, data: Metrics::DataPoint) = {
|
$pred(index: Metrics::Index, data: Metrics::Observation) = {
|
||||||
return addr_matches_host(index$host, LOCAL_HOSTS);
|
return addr_matches_host(index$host, LOCAL_HOSTS);
|
||||||
}]);
|
}]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
##! bruteforcing over SSH.
|
##! bruteforcing over SSH.
|
||||||
|
|
||||||
@load base/protocols/ssh
|
@load base/protocols/ssh
|
||||||
@load base/frameworks/measurement
|
@load base/frameworks/sumstats
|
||||||
@load base/frameworks/notice
|
@load base/frameworks/notice
|
||||||
@load base/frameworks/intel
|
@load base/frameworks/intel
|
||||||
|
|
||||||
|
@ -42,27 +42,27 @@ export {
|
||||||
|
|
||||||
event bro_init()
|
event bro_init()
|
||||||
{
|
{
|
||||||
local r1: Measurement::Reducer = [$stream="ssh.login.failure", $apply=set(Measurement::SUM)];
|
local r1: SumStats::Reducer = [$stream="ssh.login.failure", $apply=set(SumStats::SUM)];
|
||||||
Measurement::create([$epoch=guessing_timeout,
|
SumStats::create([$epoch=guessing_timeout,
|
||||||
$reducers=set(r1),
|
$reducers=set(r1),
|
||||||
$threshold_val(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
return double_to_count(result["ssh.login.failure"]$sum);
|
return double_to_count(result["ssh.login.failure"]$sum);
|
||||||
},
|
},
|
||||||
$threshold=password_guesses_limit,
|
$threshold=password_guesses_limit,
|
||||||
$threshold_crossed(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
local r = result["ssh.login.failure"];
|
local r = result["ssh.login.failure"];
|
||||||
# Generate the notice.
|
# Generate the notice.
|
||||||
NOTICE([$note=Password_Guessing,
|
NOTICE([$note=Password_Guessing,
|
||||||
$msg=fmt("%s appears to be guessing SSH passwords (seen in %d connections).", key$host, r$num),
|
$msg=fmt("%s appears to be guessing SSH passwords (seen in %d connections).", key$host, r$num),
|
||||||
$src=key$host,
|
$src=key$host,
|
||||||
$identifier=cat(key$host)]);
|
$identifier=cat(key$host)]);
|
||||||
# Insert the guesser into the intel framework.
|
# Insert the guesser into the intel framework.
|
||||||
Intel::insert([$host=key$host,
|
Intel::insert([$host=key$host,
|
||||||
$meta=[$source="local",
|
$meta=[$source="local",
|
||||||
$desc=fmt("Bro observed %d apparently failed SSH connections.", r$num)]]);
|
$desc=fmt("Bro observed %d apparently failed SSH connections.", r$num)]]);
|
||||||
}]);
|
}]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event SSH::heuristic_successful_login(c: connection)
|
event SSH::heuristic_successful_login(c: connection)
|
||||||
|
@ -82,5 +82,5 @@ event SSH::heuristic_failed_login(c: connection)
|
||||||
# be ignored.
|
# be ignored.
|
||||||
if ( ! (id$orig_h in ignore_guessers &&
|
if ( ! (id$orig_h in ignore_guessers &&
|
||||||
id$resp_h in ignore_guessers[id$orig_h]) )
|
id$resp_h in ignore_guessers[id$orig_h]) )
|
||||||
Measurement::add_data("ssh.login.failure", [$host=id$orig_h], [$num=1]);
|
SumStats::observe("ssh.login.failure", [$host=id$orig_h], [$num=1]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,83 +0,0 @@
|
||||||
# @TEST-SERIALIZE: comm
|
|
||||||
#
|
|
||||||
# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT
|
|
||||||
# @TEST-EXEC: sleep 1
|
|
||||||
# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT
|
|
||||||
# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT
|
|
||||||
# @TEST-EXEC: btest-bg-wait 15
|
|
||||||
|
|
||||||
# @TEST-EXEC: btest-diff manager-1/.stdout
|
|
||||||
|
|
||||||
@TEST-START-FILE cluster-layout.bro
|
|
||||||
redef Cluster::nodes = {
|
|
||||||
["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1", "worker-2")],
|
|
||||||
["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $interface="eth0"],
|
|
||||||
["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $interface="eth1"],
|
|
||||||
};
|
|
||||||
@TEST-END-FILE
|
|
||||||
|
|
||||||
redef Log::default_rotation_interval = 0secs;
|
|
||||||
|
|
||||||
global n = 0;
|
|
||||||
|
|
||||||
event bro_init() &priority=5
|
|
||||||
{
|
|
||||||
local r1: Measurement::Reducer = [$stream="test.metric", $apply=set(Measurement::SUM, Measurement::MIN, Measurement::MAX, Measurement::AVERAGE, Measurement::STD_DEV, Measurement::VARIANCE, Measurement::UNIQUE)];
|
|
||||||
Measurement::create([$epoch=5secs,
|
|
||||||
$reducers=set(r1),
|
|
||||||
$epoch_finished(rt: Measurement::ResultTable) =
|
|
||||||
{
|
|
||||||
for ( key in rt )
|
|
||||||
{
|
|
||||||
local r = rt[key]["test.metric"];
|
|
||||||
print fmt("Host: %s - num:%d - sum:%.1f - avg:%.1f - max:%.1f - min:%.1f - var:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$average, r$max, r$min, r$variance, r$std_dev, r$unique);
|
|
||||||
}
|
|
||||||
|
|
||||||
terminate();
|
|
||||||
}
|
|
||||||
]);
|
|
||||||
}
|
|
||||||
|
|
||||||
event remote_connection_closed(p: event_peer)
|
|
||||||
{
|
|
||||||
terminate();
|
|
||||||
}
|
|
||||||
|
|
||||||
global ready_for_data: event();
|
|
||||||
redef Cluster::manager2worker_events += /^ready_for_data$/;
|
|
||||||
|
|
||||||
event ready_for_data()
|
|
||||||
{
|
|
||||||
if ( Cluster::node == "worker-1" )
|
|
||||||
{
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=34]);
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=30]);
|
|
||||||
Measurement::add_data("test.metric", [$host=6.5.4.3], [$num=1]);
|
|
||||||
Measurement::add_data("test.metric", [$host=7.2.1.5], [$num=54]);
|
|
||||||
}
|
|
||||||
if ( Cluster::node == "worker-2" )
|
|
||||||
{
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=75]);
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=30]);
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=3]);
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=57]);
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=52]);
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=61]);
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=95]);
|
|
||||||
Measurement::add_data("test.metric", [$host=6.5.4.3], [$num=5]);
|
|
||||||
Measurement::add_data("test.metric", [$host=7.2.1.5], [$num=91]);
|
|
||||||
Measurement::add_data("test.metric", [$host=10.10.10.10], [$num=5]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
|
||||||
|
|
||||||
global peer_count = 0;
|
|
||||||
event remote_connection_handshake_done(p: event_peer) &priority=-5
|
|
||||||
{
|
|
||||||
++peer_count;
|
|
||||||
if ( peer_count == 2 )
|
|
||||||
event ready_for_data();
|
|
||||||
}
|
|
||||||
|
|
||||||
@endif
|
|
|
@ -1,34 +0,0 @@
|
||||||
# @TEST-EXEC: bro %INPUT
|
|
||||||
# @TEST-EXEC: btest-diff .stdout
|
|
||||||
|
|
||||||
event bro_init() &priority=5
|
|
||||||
{
|
|
||||||
local r1: Measurement::Reducer = [$stream="test.metric",
|
|
||||||
$apply=set(Measurement::SUM,
|
|
||||||
Measurement::VARIANCE,
|
|
||||||
Measurement::AVERAGE,
|
|
||||||
Measurement::MAX,
|
|
||||||
Measurement::MIN,
|
|
||||||
Measurement::STD_DEV,
|
|
||||||
Measurement::UNIQUE)];
|
|
||||||
Measurement::create([$epoch=3secs,
|
|
||||||
$reducers=set(r1),
|
|
||||||
$epoch_finished(data: Measurement::ResultTable) =
|
|
||||||
{
|
|
||||||
for ( key in data )
|
|
||||||
{
|
|
||||||
local r = data[key]["test.metric"];
|
|
||||||
print fmt("Host: %s - num:%d - sum:%.1f - var:%.1f - avg:%.1f - max:%.1f - min:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$variance, r$average, r$max, r$min, r$std_dev, r$unique);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]);
|
|
||||||
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=5]);
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=22]);
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=94]);
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=50]);
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=50]);
|
|
||||||
|
|
||||||
Measurement::add_data("test.metric", [$host=6.5.4.3], [$num=2]);
|
|
||||||
Measurement::add_data("test.metric", [$host=7.2.1.5], [$num=1]);
|
|
||||||
}
|
|
|
@ -1,73 +0,0 @@
|
||||||
# @TEST-EXEC: bro %INPUT
|
|
||||||
# @TEST-EXEC: btest-diff .stdout
|
|
||||||
|
|
||||||
redef enum Notice::Type += {
|
|
||||||
Test_Notice,
|
|
||||||
};
|
|
||||||
|
|
||||||
event bro_init() &priority=5
|
|
||||||
{
|
|
||||||
local r1: Measurement::Reducer = [$stream="test.metric", $apply=set(Measurement::SUM)];
|
|
||||||
Measurement::create([$epoch=3secs,
|
|
||||||
$reducers=set(r1),
|
|
||||||
#$threshold_val = Measurement::sum_threshold("test.metric"),
|
|
||||||
$threshold_val(key: Measurement::Key, result: Measurement::Result) =
|
|
||||||
{
|
|
||||||
return double_to_count(result["test.metric"]$sum);
|
|
||||||
},
|
|
||||||
$threshold=5,
|
|
||||||
$threshold_crossed(key: Measurement::Key, result: Measurement::Result) =
|
|
||||||
{
|
|
||||||
local r = result["test.metric"];
|
|
||||||
print fmt("THRESHOLD: hit a threshold value at %.0f for %s", r$sum, Measurement::key2str(key));
|
|
||||||
}
|
|
||||||
]);
|
|
||||||
|
|
||||||
local r2: Measurement::Reducer = [$stream="test.metric", $apply=set(Measurement::SUM)];
|
|
||||||
Measurement::create([$epoch=3secs,
|
|
||||||
$reducers=set(r2),
|
|
||||||
#$threshold_val = Measurement::sum_threshold("test.metric"),
|
|
||||||
$threshold_val(key: Measurement::Key, result: Measurement::Result) =
|
|
||||||
{
|
|
||||||
return double_to_count(result["test.metric"]$sum);
|
|
||||||
},
|
|
||||||
$threshold_series=vector(3,6,800),
|
|
||||||
$threshold_crossed(key: Measurement::Key, result: Measurement::Result) =
|
|
||||||
{
|
|
||||||
local r = result["test.metric"];
|
|
||||||
print fmt("THRESHOLD_SERIES: hit a threshold series value at %.0f for %s", r$sum, Measurement::key2str(key));
|
|
||||||
}
|
|
||||||
]);
|
|
||||||
|
|
||||||
local r3: Measurement::Reducer = [$stream="test.metric", $apply=set(Measurement::SUM)];
|
|
||||||
local r4: Measurement::Reducer = [$stream="test.metric2", $apply=set(Measurement::SUM)];
|
|
||||||
Measurement::create([$epoch=3secs,
|
|
||||||
$reducers=set(r3, r4),
|
|
||||||
$threshold_val(key: Measurement::Key, result: Measurement::Result) =
|
|
||||||
{
|
|
||||||
# Calculate a ratio between sums of two reducers.
|
|
||||||
if ( "test.metric2" in result && "test.metric" in result &&
|
|
||||||
result["test.metric"]$sum > 0 )
|
|
||||||
return double_to_count(result["test.metric2"]$sum / result["test.metric"]$sum);
|
|
||||||
else
|
|
||||||
return 0;
|
|
||||||
},
|
|
||||||
# Looking for metric2 sum to be 5 times the sum of metric
|
|
||||||
$threshold=5,
|
|
||||||
$threshold_crossed(key: Measurement::Key, result: Measurement::Result) =
|
|
||||||
{
|
|
||||||
local thold = result["test.metric2"]$sum / result["test.metric"]$sum;
|
|
||||||
print fmt("THRESHOLD WITH RATIO BETWEEN REDUCERS: hit a threshold value at %.0fx for %s", thold, Measurement::key2str(key));
|
|
||||||
}
|
|
||||||
]);
|
|
||||||
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=3]);
|
|
||||||
Measurement::add_data("test.metric", [$host=6.5.4.3], [$num=2]);
|
|
||||||
Measurement::add_data("test.metric", [$host=7.2.1.5], [$num=1]);
|
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=3]);
|
|
||||||
Measurement::add_data("test.metric", [$host=7.2.1.5], [$num=1000]);
|
|
||||||
Measurement::add_data("test.metric2", [$host=7.2.1.5], [$num=10]);
|
|
||||||
Measurement::add_data("test.metric2", [$host=7.2.1.5], [$num=1000]);
|
|
||||||
Measurement::add_data("test.metric2", [$host=7.2.1.5], [$num=54321]);
|
|
||||||
|
|
||||||
}
|
|
|
@ -0,0 +1,82 @@
|
||||||
|
# @TEST-SERIALIZE: comm
|
||||||
|
#
|
||||||
|
# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT
|
||||||
|
# @TEST-EXEC: sleep 1
|
||||||
|
# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT
|
||||||
|
# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT
|
||||||
|
# @TEST-EXEC: btest-bg-wait 15
|
||||||
|
|
||||||
|
# @TEST-EXEC: btest-diff manager-1/.stdout
|
||||||
|
|
||||||
|
@TEST-START-FILE cluster-layout.bro
|
||||||
|
redef Cluster::nodes = {
|
||||||
|
["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1", "worker-2")],
|
||||||
|
["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $interface="eth0"],
|
||||||
|
["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $interface="eth1"],
|
||||||
|
};
|
||||||
|
@TEST-END-FILE
|
||||||
|
|
||||||
|
redef Log::default_rotation_interval = 0secs;
|
||||||
|
|
||||||
|
global n = 0;
|
||||||
|
|
||||||
|
event bro_init() &priority=5
|
||||||
|
{
|
||||||
|
local r1: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM, SumStats::MIN, SumStats::MAX, SumStats::AVERAGE, SumStats::STD_DEV, SumStats::VARIANCE, SumStats::UNIQUE)];
|
||||||
|
SumStats::create([$epoch=5secs,
|
||||||
|
$reducers=set(r1),
|
||||||
|
$epoch_finished(rt: SumStats::ResultTable) =
|
||||||
|
{
|
||||||
|
for ( key in rt )
|
||||||
|
{
|
||||||
|
local r = rt[key]["test.metric"];
|
||||||
|
print fmt("Host: %s - num:%d - sum:%.1f - avg:%.1f - max:%.1f - min:%.1f - var:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$average, r$max, r$min, r$variance, r$std_dev, r$unique);
|
||||||
|
}
|
||||||
|
|
||||||
|
terminate();
|
||||||
|
}]);
|
||||||
|
}
|
||||||
|
|
||||||
|
event remote_connection_closed(p: event_peer)
|
||||||
|
{
|
||||||
|
terminate();
|
||||||
|
}
|
||||||
|
|
||||||
|
global ready_for_data: event();
|
||||||
|
redef Cluster::manager2worker_events += /^ready_for_data$/;
|
||||||
|
|
||||||
|
event ready_for_data()
|
||||||
|
{
|
||||||
|
if ( Cluster::node == "worker-1" )
|
||||||
|
{
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=34]);
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=30]);
|
||||||
|
SumStats::observe("test.metric", [$host=6.5.4.3], [$num=1]);
|
||||||
|
SumStats::observe("test.metric", [$host=7.2.1.5], [$num=54]);
|
||||||
|
}
|
||||||
|
if ( Cluster::node == "worker-2" )
|
||||||
|
{
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=75]);
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=30]);
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=3]);
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=57]);
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=52]);
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=61]);
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=95]);
|
||||||
|
SumStats::observe("test.metric", [$host=6.5.4.3], [$num=5]);
|
||||||
|
SumStats::observe("test.metric", [$host=7.2.1.5], [$num=91]);
|
||||||
|
SumStats::observe("test.metric", [$host=10.10.10.10], [$num=5]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||||
|
|
||||||
|
global peer_count = 0;
|
||||||
|
event remote_connection_handshake_done(p: event_peer) &priority=-5
|
||||||
|
{
|
||||||
|
++peer_count;
|
||||||
|
if ( peer_count == 2 )
|
||||||
|
event ready_for_data();
|
||||||
|
}
|
||||||
|
|
||||||
|
@endif
|
34
testing/btest/scripts/base/frameworks/sumstats/basic.bro
Normal file
34
testing/btest/scripts/base/frameworks/sumstats/basic.bro
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
# @TEST-EXEC: bro %INPUT
|
||||||
|
# @TEST-EXEC: btest-diff .stdout
|
||||||
|
|
||||||
|
event bro_init() &priority=5
|
||||||
|
{
|
||||||
|
local r1: SumStats::Reducer = [$stream="test.metric",
|
||||||
|
$apply=set(SumStats::SUM,
|
||||||
|
SumStats::VARIANCE,
|
||||||
|
SumStats::AVERAGE,
|
||||||
|
SumStats::MAX,
|
||||||
|
SumStats::MIN,
|
||||||
|
SumStats::STD_DEV,
|
||||||
|
SumStats::UNIQUE)];
|
||||||
|
SumStats::create([$epoch=3secs,
|
||||||
|
$reducers=set(r1),
|
||||||
|
$epoch_finished(data: SumStats::ResultTable) =
|
||||||
|
{
|
||||||
|
for ( key in data )
|
||||||
|
{
|
||||||
|
local r = data[key]["test.metric"];
|
||||||
|
print fmt("Host: %s - num:%d - sum:%.1f - var:%.1f - avg:%.1f - max:%.1f - min:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$variance, r$average, r$max, r$min, r$std_dev, r$unique);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]);
|
||||||
|
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=5]);
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=22]);
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=94]);
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=50]);
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=50]);
|
||||||
|
|
||||||
|
SumStats::observe("test.metric", [$host=6.5.4.3], [$num=2]);
|
||||||
|
SumStats::observe("test.metric", [$host=7.2.1.5], [$num=1]);
|
||||||
|
}
|
|
@ -19,20 +19,19 @@ redef Log::default_rotation_interval = 0secs;
|
||||||
|
|
||||||
event bro_init() &priority=5
|
event bro_init() &priority=5
|
||||||
{
|
{
|
||||||
local r1: Measurement::Reducer = [$stream="test.metric", $apply=set(Measurement::SUM)];
|
local r1: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)];
|
||||||
Measurement::create([$epoch=1hr,
|
SumStats::create([$epoch=1hr,
|
||||||
$reducers=set(r1),
|
$reducers=set(r1),
|
||||||
$threshold_val(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
return double_to_count(result["test.metric"]$sum);
|
return double_to_count(result["test.metric"]$sum);
|
||||||
},
|
},
|
||||||
$threshold=100,
|
$threshold=100,
|
||||||
$threshold_crossed(key: Measurement::Key, result: Measurement::Result) =
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
print fmt("A test metric threshold was crossed with a value of: %.1f", result["test.metric"]$sum);
|
print fmt("A test metric threshold was crossed with a value of: %.1f", result["test.metric"]$sum);
|
||||||
terminate();
|
terminate();
|
||||||
}
|
}]);
|
||||||
]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
event remote_connection_closed(p: event_peer)
|
event remote_connection_closed(p: event_peer)
|
||||||
|
@ -40,12 +39,12 @@ event remote_connection_closed(p: event_peer)
|
||||||
terminate();
|
terminate();
|
||||||
}
|
}
|
||||||
|
|
||||||
event do_metrics(i: count)
|
event do_stats(i: count)
|
||||||
{
|
{
|
||||||
# Worker-1 will trigger an intermediate update and then if everything
|
# Worker-1 will trigger an intermediate update and then if everything
|
||||||
# works correctly, the data from worker-2 will hit the threshold and
|
# works correctly, the data from worker-2 will hit the threshold and
|
||||||
# should trigger the notice.
|
# should trigger the notice.
|
||||||
Measurement::add_data("test.metric", [$host=1.2.3.4], [$num=i]);
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event remote_connection_handshake_done(p: event_peer)
|
event remote_connection_handshake_done(p: event_peer)
|
||||||
|
@ -53,8 +52,8 @@ event remote_connection_handshake_done(p: event_peer)
|
||||||
if ( p$descr == "manager-1" )
|
if ( p$descr == "manager-1" )
|
||||||
{
|
{
|
||||||
if ( Cluster::node == "worker-1" )
|
if ( Cluster::node == "worker-1" )
|
||||||
schedule 0.1sec { do_metrics(1) };
|
schedule 0.1sec { do_stats(1) };
|
||||||
if ( Cluster::node == "worker-2" )
|
if ( Cluster::node == "worker-2" )
|
||||||
schedule 0.5sec { do_metrics(99) };
|
schedule 0.5sec { do_stats(99) };
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -0,0 +1,73 @@
|
||||||
|
# @TEST-EXEC: bro %INPUT
|
||||||
|
# @TEST-EXEC: btest-diff .stdout
|
||||||
|
|
||||||
|
redef enum Notice::Type += {
|
||||||
|
Test_Notice,
|
||||||
|
};
|
||||||
|
|
||||||
|
event bro_init() &priority=5
|
||||||
|
{
|
||||||
|
local r1: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)];
|
||||||
|
SumStats::create([$epoch=3secs,
|
||||||
|
$reducers=set(r1),
|
||||||
|
#$threshold_val = SumStats::sum_threshold("test.metric"),
|
||||||
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
|
{
|
||||||
|
return double_to_count(result["test.metric"]$sum);
|
||||||
|
},
|
||||||
|
$threshold=5,
|
||||||
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
|
{
|
||||||
|
local r = result["test.metric"];
|
||||||
|
print fmt("THRESHOLD: hit a threshold value at %.0f for %s", r$sum, SumStats::key2str(key));
|
||||||
|
}
|
||||||
|
]);
|
||||||
|
|
||||||
|
local r2: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)];
|
||||||
|
SumStats::create([$epoch=3secs,
|
||||||
|
$reducers=set(r2),
|
||||||
|
#$threshold_val = SumStats::sum_threshold("test.metric"),
|
||||||
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
|
{
|
||||||
|
return double_to_count(result["test.metric"]$sum);
|
||||||
|
},
|
||||||
|
$threshold_series=vector(3,6,800),
|
||||||
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
|
{
|
||||||
|
local r = result["test.metric"];
|
||||||
|
print fmt("THRESHOLD_SERIES: hit a threshold series value at %.0f for %s", r$sum, SumStats::key2str(key));
|
||||||
|
}
|
||||||
|
]);
|
||||||
|
|
||||||
|
local r3: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)];
|
||||||
|
local r4: SumStats::Reducer = [$stream="test.metric2", $apply=set(SumStats::SUM)];
|
||||||
|
SumStats::create([$epoch=3secs,
|
||||||
|
$reducers=set(r3, r4),
|
||||||
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
|
{
|
||||||
|
# Calculate a ratio between sums of two reducers.
|
||||||
|
if ( "test.metric2" in result && "test.metric" in result &&
|
||||||
|
result["test.metric"]$sum > 0 )
|
||||||
|
return double_to_count(result["test.metric2"]$sum / result["test.metric"]$sum);
|
||||||
|
else
|
||||||
|
return 0;
|
||||||
|
},
|
||||||
|
# Looking for metric2 sum to be 5 times the sum of metric
|
||||||
|
$threshold=5,
|
||||||
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
|
{
|
||||||
|
local thold = result["test.metric2"]$sum / result["test.metric"]$sum;
|
||||||
|
print fmt("THRESHOLD WITH RATIO BETWEEN REDUCERS: hit a threshold value at %.0fx for %s", thold, SumStats::key2str(key));
|
||||||
|
}
|
||||||
|
]);
|
||||||
|
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=3]);
|
||||||
|
SumStats::observe("test.metric", [$host=6.5.4.3], [$num=2]);
|
||||||
|
SumStats::observe("test.metric", [$host=7.2.1.5], [$num=1]);
|
||||||
|
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=3]);
|
||||||
|
SumStats::observe("test.metric", [$host=7.2.1.5], [$num=1000]);
|
||||||
|
SumStats::observe("test.metric2", [$host=7.2.1.5], [$num=10]);
|
||||||
|
SumStats::observe("test.metric2", [$host=7.2.1.5], [$num=1000]);
|
||||||
|
SumStats::observe("test.metric2", [$host=7.2.1.5], [$num=54321]);
|
||||||
|
|
||||||
|
}
|
Loading…
Add table
Add a link
Reference in a new issue