mirror of
https://github.com/zeek/zeek.git
synced 2025-10-03 07:08:19 +00:00
Merge remote-tracking branch 'origin/master' into topic/bernhard/hyperloglog
This commit is contained in:
commit
3e74cdc6e0
37 changed files with 475 additions and 344 deletions
34
CHANGES
34
CHANGES
|
@ -1,4 +1,38 @@
|
||||||
|
|
||||||
|
2.1-498 | 2013-05-03 17:44:08 -0700
|
||||||
|
|
||||||
|
* Table lookups return copy of non-const &default vals. This
|
||||||
|
prevents unintentional modifications to the &default value itself.
|
||||||
|
Addresses #981. (Jon Siwek)
|
||||||
|
|
||||||
|
2.1-496 | 2013-05-03 15:54:47 -0700
|
||||||
|
|
||||||
|
* Fix memory leak and unnecessary allocations in OpaqueVal.
|
||||||
|
Addresses #986. (Matthias Vallentin)
|
||||||
|
|
||||||
|
2.1-492 | 2013-05-02 12:46:26 -0700
|
||||||
|
|
||||||
|
* Work-around for sumstats framework not propagating updates after
|
||||||
|
intermediate check in cluster environments. (Bernhard Amann)
|
||||||
|
|
||||||
|
* Always apply tcp_connection_attempt. Before this change it was
|
||||||
|
only applied when a connection_attempt() event handler was
|
||||||
|
defined. (Robin Sommer)
|
||||||
|
|
||||||
|
* Fixing coverage.bare-mode-errors test. (Robin Sommer)
|
||||||
|
|
||||||
|
2.1-487 | 2013-05-01 18:03:22 -0700
|
||||||
|
|
||||||
|
* Always apply tcp_connection_attempt timer, even if no
|
||||||
|
connection_attempt() event handler is defined. (Robin Sommer)
|
||||||
|
|
||||||
|
2.1-486 | 2013-05-01 15:28:45 -0700
|
||||||
|
|
||||||
|
* New framework for computing summary statistics in
|
||||||
|
base/framework/sumstats. This replaces the metrics frameworks, and
|
||||||
|
comes with a number of applications build on top, see NEWS. More
|
||||||
|
documentation to follow. (Seth Hall)
|
||||||
|
|
||||||
2.1-397 | 2013-04-29 21:19:00 -0700
|
2.1-397 | 2013-04-29 21:19:00 -0700
|
||||||
|
|
||||||
* Fixing memory leaks in CompHash implementation. Addresses #987.
|
* Fixing memory leaks in CompHash implementation. Addresses #987.
|
||||||
|
|
24
NEWS
24
NEWS
|
@ -126,6 +126,9 @@ Changed Functionality
|
||||||
- Removed the byte_len() and length() bif functions. Use the "|...|"
|
- Removed the byte_len() and length() bif functions. Use the "|...|"
|
||||||
operator instead.
|
operator instead.
|
||||||
|
|
||||||
|
- The SSH::Login notice has been superseded by an corresponding
|
||||||
|
intelligence framework observation (SSH::SUCCESSFUL_LOGIN).
|
||||||
|
|
||||||
Bro 2.1
|
Bro 2.1
|
||||||
-------
|
-------
|
||||||
|
|
||||||
|
@ -209,6 +212,27 @@ New Functionality
|
||||||
outputs. We do not yet recommend them for production (but welcome
|
outputs. We do not yet recommend them for production (but welcome
|
||||||
feedback!)
|
feedback!)
|
||||||
|
|
||||||
|
- Summary statistics framework. [Extend]
|
||||||
|
|
||||||
|
- A number of new applications build on top of the summary statistics
|
||||||
|
framework:
|
||||||
|
|
||||||
|
* Scan detection: Detectors for port and address scans return. See
|
||||||
|
policy/misc/scan.bro.
|
||||||
|
|
||||||
|
* Tracerouter detector: policy/misc/detect-traceroute
|
||||||
|
|
||||||
|
* Web application detection/measurement: policy/misc/app-metrics.bro
|
||||||
|
|
||||||
|
* FTP brute-forcing detector: policy/protocols/ftp/detect-bruteforcing.bro
|
||||||
|
|
||||||
|
* HTTP-based SQL injection detector: policy/protocols/http/detect-sqli.bro
|
||||||
|
(existed before, but now ported to the new framework)
|
||||||
|
|
||||||
|
* SSH brute-forcing detector feeding the intelligence framework:
|
||||||
|
policy/protocols/ssh/detect-bruteforcing.bro
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Changed Functionality
|
Changed Functionality
|
||||||
~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
2.1-397
|
2.1-498
|
||||||
|
|
|
@ -10,49 +10,48 @@
|
||||||
module SumStats;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## Allows a user to decide how large of result groups the
|
## Allows a user to decide how large of result groups the workers should transmit
|
||||||
## workers should transmit values for cluster stats aggregation.
|
## values for cluster stats aggregation.
|
||||||
const cluster_send_in_groups_of = 50 &redef;
|
const cluster_send_in_groups_of = 50 &redef;
|
||||||
|
|
||||||
## The percent of the full threshold value that needs to be met
|
## The percent of the full threshold value that needs to be met on a single worker
|
||||||
## on a single worker for that worker to send the value to its manager in
|
## for that worker to send the value to its manager in order for it to request a
|
||||||
## order for it to request a global view for that value. There is no
|
## global view for that value. There is no requirement that the manager requests
|
||||||
## requirement that the manager requests a global view for the key
|
## a global view for the key since it may opt not to if it requested a global view
|
||||||
## since it may opt not to if it requested a global view for the key
|
## for the key recently.
|
||||||
## recently.
|
|
||||||
const cluster_request_global_view_percent = 0.2 &redef;
|
const cluster_request_global_view_percent = 0.2 &redef;
|
||||||
|
|
||||||
## This is to deal with intermediate update overload. A manager will only allow
|
## This is to deal with intermediate update overload. A manager will only allow
|
||||||
## this many intermediate update requests to the workers to be inflight at
|
## this many intermediate update requests to the workers to be inflight at any
|
||||||
## any given time. Requested intermediate updates are currently thrown out
|
## given time. Requested intermediate updates are currently thrown out and not
|
||||||
## and not performed. In practice this should hopefully have a minimal effect.
|
## performed. In practice this should hopefully have a minimal effect.
|
||||||
const max_outstanding_global_views = 10 &redef;
|
const max_outstanding_global_views = 10 &redef;
|
||||||
|
|
||||||
## Intermediate updates can cause overload situations on very large clusters.
|
## Intermediate updates can cause overload situations on very large clusters. This
|
||||||
## This option may help reduce load and correct intermittent problems.
|
## option may help reduce load and correct intermittent problems. The goal for this
|
||||||
## The goal for this option is also meant to be temporary.
|
## option is also meant to be temporary.
|
||||||
const enable_intermediate_updates = T &redef;
|
const enable_intermediate_updates = T &redef;
|
||||||
|
|
||||||
## Event sent by the manager in a cluster to initiate the
|
## Event sent by the manager in a cluster to initiate the collection of values for
|
||||||
## collection of values for a sumstat.
|
## a sumstat.
|
||||||
global cluster_ss_request: event(uid: string, ssid: string);
|
global cluster_ss_request: event(uid: string, ssid: string);
|
||||||
|
|
||||||
## Event sent by nodes that are collecting sumstats after receiving
|
## Event sent by nodes that are collecting sumstats after receiving a request for
|
||||||
## a request for the sumstat from the manager.
|
## the sumstat from the manager.
|
||||||
global cluster_ss_response: event(uid: string, ssid: string, data: ResultTable, done: bool);
|
global cluster_ss_response: event(uid: string, ssid: string, data: ResultTable, done: bool);
|
||||||
|
|
||||||
## This event is sent by the manager in a cluster to initiate the
|
## This event is sent by the manager in a cluster to initiate the collection of
|
||||||
## collection of a single key value from a sumstat. It's typically
|
## a single key value from a sumstat. It's typically used to get intermediate
|
||||||
## used to get intermediate updates before the break interval triggers
|
## updates before the break interval triggers to speed detection of a value
|
||||||
## to speed detection of a value crossing a threshold.
|
## crossing a threshold.
|
||||||
global cluster_key_request: event(uid: string, ssid: string, key: Key);
|
global cluster_key_request: event(uid: string, ssid: string, key: Key);
|
||||||
|
|
||||||
## This event is sent by nodes in response to a
|
## This event is sent by nodes in response to a
|
||||||
## :bro:id:`SumStats::cluster_key_request` event.
|
## :bro:id:`SumStats::cluster_key_request` event.
|
||||||
global cluster_key_response: event(uid: string, ssid: string, key: Key, result: Result);
|
global cluster_key_response: event(uid: string, ssid: string, key: Key, result: Result);
|
||||||
|
|
||||||
## This is sent by workers to indicate that they crossed the percent of the
|
## This is sent by workers to indicate that they crossed the percent
|
||||||
## current threshold by the percentage defined globally in
|
## of the current threshold by the percentage defined globally in
|
||||||
## :bro:id:`SumStats::cluster_request_global_view_percent`
|
## :bro:id:`SumStats::cluster_request_global_view_percent`
|
||||||
global cluster_key_intermediate_response: event(ssid: string, key: SumStats::Key);
|
global cluster_key_intermediate_response: event(ssid: string, key: SumStats::Key);
|
||||||
|
|
||||||
|
@ -69,7 +68,7 @@ redef Cluster::manager2worker_events += /SumStats::thresholds_reset/;
|
||||||
redef Cluster::worker2manager_events += /SumStats::cluster_(ss_response|key_response|key_intermediate_response)/;
|
redef Cluster::worker2manager_events += /SumStats::cluster_(ss_response|key_response|key_intermediate_response)/;
|
||||||
|
|
||||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||||
# This variable is maintained to know what keys have recently sent as
|
# This variable is maintained to know what keys have recently sent as
|
||||||
# intermediate updates so they don't overwhelm their manager. The count that is
|
# intermediate updates so they don't overwhelm their manager. The count that is
|
||||||
# yielded is the number of times the percentage threshold has been crossed and
|
# yielded is the number of times the percentage threshold has been crossed and
|
||||||
# an intermediate result has been received.
|
# an intermediate result has been received.
|
||||||
|
@ -82,7 +81,7 @@ event bro_init() &priority=-100
|
||||||
reducer_store = table();
|
reducer_store = table();
|
||||||
}
|
}
|
||||||
|
|
||||||
# This is done on all non-manager node types in the event that a sumstat is
|
# This is done on all non-manager node types in the event that a sumstat is
|
||||||
# being collected somewhere other than a worker.
|
# being collected somewhere other than a worker.
|
||||||
function data_added(ss: SumStat, key: Key, result: Result)
|
function data_added(ss: SumStat, key: Key, result: Result)
|
||||||
{
|
{
|
||||||
|
@ -92,9 +91,9 @@ function data_added(ss: SumStat, key: Key, result: Result)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
# If val is 5 and global view % is 0.1 (10%), pct_val will be 50. If that
|
# If val is 5 and global view % is 0.1 (10%), pct_val will be 50. If that
|
||||||
# crosses the full threshold then it's a candidate to send as an
|
# crosses the full threshold then it's a candidate to send as an
|
||||||
# intermediate update.
|
# intermediate update.
|
||||||
if ( enable_intermediate_updates &&
|
if ( enable_intermediate_updates &&
|
||||||
check_thresholds(ss, key, result, cluster_request_global_view_percent) )
|
check_thresholds(ss, key, result, cluster_request_global_view_percent) )
|
||||||
{
|
{
|
||||||
# kick off intermediate update
|
# kick off intermediate update
|
||||||
|
@ -113,19 +112,21 @@ event SumStats::send_data(uid: string, ssid: string, data: ResultTable)
|
||||||
{
|
{
|
||||||
local_data[key] = data[key];
|
local_data[key] = data[key];
|
||||||
delete data[key];
|
delete data[key];
|
||||||
|
|
||||||
# Only send cluster_send_in_groups_of at a time. Queue another
|
# Only send cluster_send_in_groups_of at a time. Queue another
|
||||||
# event to send the next group.
|
# event to send the next group.
|
||||||
if ( cluster_send_in_groups_of == ++num_added )
|
if ( cluster_send_in_groups_of == ++num_added )
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
local done = F;
|
local done = F;
|
||||||
# If data is empty, this sumstat is done.
|
# If data is empty, this sumstat is done.
|
||||||
if ( |data| == 0 )
|
if ( |data| == 0 )
|
||||||
done = T;
|
done = T;
|
||||||
|
|
||||||
event SumStats::cluster_ss_response(uid, ssid, local_data, done);
|
# Note: copy is needed to compensate serialization caching issue. This should be
|
||||||
|
# changed to something else later.
|
||||||
|
event SumStats::cluster_ss_response(uid, ssid, copy(local_data), done);
|
||||||
if ( ! done )
|
if ( ! done )
|
||||||
schedule 0.01 sec { SumStats::send_data(uid, ssid, data) };
|
schedule 0.01 sec { SumStats::send_data(uid, ssid, data) };
|
||||||
}
|
}
|
||||||
|
@ -133,7 +134,7 @@ event SumStats::send_data(uid: string, ssid: string, data: ResultTable)
|
||||||
event SumStats::cluster_ss_request(uid: string, ssid: string)
|
event SumStats::cluster_ss_request(uid: string, ssid: string)
|
||||||
{
|
{
|
||||||
#print fmt("WORKER %s: received the cluster_ss_request event for %s.", Cluster::node, id);
|
#print fmt("WORKER %s: received the cluster_ss_request event for %s.", Cluster::node, id);
|
||||||
|
|
||||||
# Initiate sending all of the data for the requested stats.
|
# Initiate sending all of the data for the requested stats.
|
||||||
if ( ssid in result_store )
|
if ( ssid in result_store )
|
||||||
event SumStats::send_data(uid, ssid, result_store[ssid]);
|
event SumStats::send_data(uid, ssid, result_store[ssid]);
|
||||||
|
@ -145,13 +146,16 @@ event SumStats::cluster_ss_request(uid: string, ssid: string)
|
||||||
if ( ssid in stats_store )
|
if ( ssid in stats_store )
|
||||||
reset(stats_store[ssid]);
|
reset(stats_store[ssid]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event SumStats::cluster_key_request(uid: string, ssid: string, key: Key)
|
event SumStats::cluster_key_request(uid: string, ssid: string, key: Key)
|
||||||
{
|
{
|
||||||
if ( ssid in result_store && key in result_store[ssid] )
|
if ( ssid in result_store && key in result_store[ssid] )
|
||||||
{
|
{
|
||||||
#print fmt("WORKER %s: received the cluster_key_request event for %s=%s.", Cluster::node, key2str(key), data);
|
#print fmt("WORKER %s: received the cluster_key_request event for %s=%s.", Cluster::node, key2str(key), data);
|
||||||
event SumStats::cluster_key_response(uid, ssid, key, result_store[ssid][key]);
|
|
||||||
|
# Note: copy is needed to compensate serialization caching issue. This should be
|
||||||
|
# changed to something else later.
|
||||||
|
event SumStats::cluster_key_response(uid, ssid, key, copy(result_store[ssid][key]));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -179,27 +183,27 @@ event SumStats::thresholds_reset(ssid: string)
|
||||||
|
|
||||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||||
|
|
||||||
# This variable is maintained by manager nodes as they collect and aggregate
|
# This variable is maintained by manager nodes as they collect and aggregate
|
||||||
# results.
|
# results.
|
||||||
# Index on a uid.
|
# Index on a uid.
|
||||||
global stats_results: table[string] of ResultTable &read_expire=1min;
|
global stats_results: table[string] of ResultTable &read_expire=1min;
|
||||||
|
|
||||||
# This variable is maintained by manager nodes to track how many "dones" they
|
# This variable is maintained by manager nodes to track how many "dones" they
|
||||||
# collected per collection unique id. Once the number of results for a uid
|
# collected per collection unique id. Once the number of results for a uid
|
||||||
# matches the number of peer nodes that results should be coming from, the
|
# matches the number of peer nodes that results should be coming from, the
|
||||||
# result is written out and deleted from here.
|
# result is written out and deleted from here.
|
||||||
# Indexed on a uid.
|
# Indexed on a uid.
|
||||||
# TODO: add an &expire_func in case not all results are received.
|
# TODO: add an &expire_func in case not all results are received.
|
||||||
global done_with: table[string] of count &read_expire=1min &default=0;
|
global done_with: table[string] of count &read_expire=1min &default=0;
|
||||||
|
|
||||||
# This variable is maintained by managers to track intermediate responses as
|
# This variable is maintained by managers to track intermediate responses as
|
||||||
# they are getting a global view for a certain key.
|
# they are getting a global view for a certain key.
|
||||||
# Indexed on a uid.
|
# Indexed on a uid.
|
||||||
global key_requests: table[string] of Result &read_expire=1min;
|
global key_requests: table[string] of Result &read_expire=1min;
|
||||||
|
|
||||||
# This variable is maintained by managers to prevent overwhelming communication due
|
# This variable is maintained by managers to prevent overwhelming communication due
|
||||||
# to too many intermediate updates. Each sumstat is tracked separately so that
|
# to too many intermediate updates. Each sumstat is tracked separately so that
|
||||||
# one won't overwhelm and degrade other quieter sumstats.
|
# one won't overwhelm and degrade other quieter sumstats.
|
||||||
# Indexed on a sumstat id.
|
# Indexed on a sumstat id.
|
||||||
global outstanding_global_views: table[string] of count &default=0;
|
global outstanding_global_views: table[string] of count &default=0;
|
||||||
|
|
||||||
|
@ -211,11 +215,11 @@ event SumStats::finish_epoch(ss: SumStat)
|
||||||
{
|
{
|
||||||
#print fmt("%.6f MANAGER: breaking %s sumstat for %s sumstat", network_time(), ss$name, ss$id);
|
#print fmt("%.6f MANAGER: breaking %s sumstat for %s sumstat", network_time(), ss$name, ss$id);
|
||||||
local uid = unique_id("");
|
local uid = unique_id("");
|
||||||
|
|
||||||
if ( uid in stats_results )
|
if ( uid in stats_results )
|
||||||
delete stats_results[uid];
|
delete stats_results[uid];
|
||||||
stats_results[uid] = table();
|
stats_results[uid] = table();
|
||||||
|
|
||||||
# Request data from peers.
|
# Request data from peers.
|
||||||
event SumStats::cluster_ss_request(uid, ss$id);
|
event SumStats::cluster_ss_request(uid, ss$id);
|
||||||
}
|
}
|
||||||
|
@ -224,7 +228,7 @@ event SumStats::finish_epoch(ss: SumStat)
|
||||||
schedule ss$epoch { SumStats::finish_epoch(ss) };
|
schedule ss$epoch { SumStats::finish_epoch(ss) };
|
||||||
}
|
}
|
||||||
|
|
||||||
# This is unlikely to be called often, but it's here in
|
# This is unlikely to be called often, but it's here in
|
||||||
# case there are sumstats being collected by managers.
|
# case there are sumstats being collected by managers.
|
||||||
function data_added(ss: SumStat, key: Key, result: Result)
|
function data_added(ss: SumStat, key: Key, result: Result)
|
||||||
{
|
{
|
||||||
|
@ -234,7 +238,7 @@ function data_added(ss: SumStat, key: Key, result: Result)
|
||||||
event SumStats::cluster_threshold_crossed(ss$id, key, threshold_tracker[ss$id][key]);
|
event SumStats::cluster_threshold_crossed(ss$id, key, threshold_tracker[ss$id][key]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
event SumStats::cluster_key_response(uid: string, ssid: string, key: Key, result: Result)
|
event SumStats::cluster_key_response(uid: string, ssid: string, key: Key, result: Result)
|
||||||
{
|
{
|
||||||
#print fmt("%0.6f MANAGER: receiving key data from %s - %s=%s", network_time(), get_event_peer()$descr, key2str(key), result);
|
#print fmt("%0.6f MANAGER: receiving key data from %s - %s=%s", network_time(), get_event_peer()$descr, key2str(key), result);
|
||||||
|
@ -277,7 +281,7 @@ event SumStats::cluster_key_intermediate_response(ssid: string, key: Key)
|
||||||
if ( ssid in outstanding_global_views &&
|
if ( ssid in outstanding_global_views &&
|
||||||
|outstanding_global_views[ssid]| > max_outstanding_global_views )
|
|outstanding_global_views[ssid]| > max_outstanding_global_views )
|
||||||
{
|
{
|
||||||
# Don't do this intermediate update. Perhaps at some point in the future
|
# Don't do this intermediate update. Perhaps at some point in the future
|
||||||
# we will queue and randomly select from these ignored intermediate
|
# we will queue and randomly select from these ignored intermediate
|
||||||
# update requests.
|
# update requests.
|
||||||
return;
|
return;
|
||||||
|
@ -308,7 +312,7 @@ event SumStats::cluster_ss_response(uid: string, ssid: string, data: ResultTable
|
||||||
local_data[key] = data[key];
|
local_data[key] = data[key];
|
||||||
|
|
||||||
# If a stat is done being collected, thresholds for each key
|
# If a stat is done being collected, thresholds for each key
|
||||||
# need to be checked so we're doing it here to avoid doubly
|
# need to be checked so we're doing it here to avoid doubly
|
||||||
# iterating over each key.
|
# iterating over each key.
|
||||||
if ( Cluster::worker_count == done_with[uid] )
|
if ( Cluster::worker_count == done_with[uid] )
|
||||||
{
|
{
|
||||||
|
@ -319,7 +323,7 @@ event SumStats::cluster_ss_response(uid: string, ssid: string, data: ResultTable
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# If the data has been collected from all peers, we are done and ready to finish.
|
# If the data has been collected from all peers, we are done and ready to finish.
|
||||||
if ( Cluster::worker_count == done_with[uid] )
|
if ( Cluster::worker_count == done_with[uid] )
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
##! The summary statistics framework provides a way to
|
##! The summary statistics framework provides a way to
|
||||||
##! summarize large streams of data into simple reduced
|
##! summarize large streams of data into simple reduced
|
||||||
##! measurements.
|
##! measurements.
|
||||||
|
|
||||||
module SumStats;
|
module SumStats;
|
||||||
|
@ -10,24 +10,24 @@ export {
|
||||||
PLACEHOLDER
|
PLACEHOLDER
|
||||||
};
|
};
|
||||||
|
|
||||||
## Represents a thing which is having summarization
|
## Represents a thing which is having summarization
|
||||||
## results collected for it.
|
## results collected for it.
|
||||||
type Key: record {
|
type Key: record {
|
||||||
## A non-address related summarization or a sub-key for
|
## A non-address related summarization or a sub-key for
|
||||||
## an address based summarization. An example might be
|
## an address based summarization. An example might be
|
||||||
## successful SSH connections by client IP address
|
## successful SSH connections by client IP address
|
||||||
## where the client string would be the key value.
|
## where the client string would be the key value.
|
||||||
## Another example might be number of HTTP requests to
|
## Another example might be number of HTTP requests to
|
||||||
## a particular value in a Host header. This is an
|
## a particular value in a Host header. This is an
|
||||||
## example of a non-host based metric since multiple
|
## example of a non-host based metric since multiple
|
||||||
## IP addresses could respond for the same Host
|
## IP addresses could respond for the same Host
|
||||||
## header value.
|
## header value.
|
||||||
str: string &optional;
|
str: string &optional;
|
||||||
|
|
||||||
## Host is the value to which this metric applies.
|
## Host is the value to which this metric applies.
|
||||||
host: addr &optional;
|
host: addr &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
## Represents data being added for a single observation.
|
## Represents data being added for a single observation.
|
||||||
## Only supply a single field at a time!
|
## Only supply a single field at a time!
|
||||||
type Observation: record {
|
type Observation: record {
|
||||||
|
@ -40,17 +40,17 @@ export {
|
||||||
};
|
};
|
||||||
|
|
||||||
type Reducer: record {
|
type Reducer: record {
|
||||||
## Observation stream identifier for the reducer
|
## Observation stream identifier for the reducer
|
||||||
## to attach to.
|
## to attach to.
|
||||||
stream: string;
|
stream: string;
|
||||||
|
|
||||||
## The calculations to perform on the data points.
|
## The calculations to perform on the data points.
|
||||||
apply: set[Calculation];
|
apply: set[Calculation];
|
||||||
|
|
||||||
## A predicate so that you can decide per key if you
|
## A predicate so that you can decide per key if you
|
||||||
## would like to accept the data being inserted.
|
## would like to accept the data being inserted.
|
||||||
pred: function(key: SumStats::Key, obs: SumStats::Observation): bool &optional;
|
pred: function(key: SumStats::Key, obs: SumStats::Observation): bool &optional;
|
||||||
|
|
||||||
## A function to normalize the key. This can be used to aggregate or
|
## A function to normalize the key. This can be used to aggregate or
|
||||||
## normalize the entire key.
|
## normalize the entire key.
|
||||||
normalize_key: function(key: SumStats::Key): Key &optional;
|
normalize_key: function(key: SumStats::Key): Key &optional;
|
||||||
|
@ -59,11 +59,11 @@ export {
|
||||||
## Value calculated for an observation stream fed into a reducer.
|
## Value calculated for an observation stream fed into a reducer.
|
||||||
## Most of the fields are added by plugins.
|
## Most of the fields are added by plugins.
|
||||||
type ResultVal: record {
|
type ResultVal: record {
|
||||||
## The time when the first observation was added to
|
## The time when the first observation was added to
|
||||||
## this result value.
|
## this result value.
|
||||||
begin: time;
|
begin: time;
|
||||||
|
|
||||||
## The time when the last observation was added to
|
## The time when the last observation was added to
|
||||||
## this result value.
|
## this result value.
|
||||||
end: time;
|
end: time;
|
||||||
|
|
||||||
|
@ -74,55 +74,56 @@ export {
|
||||||
## Type to store results for multiple reducers.
|
## Type to store results for multiple reducers.
|
||||||
type Result: table[string] of ResultVal;
|
type Result: table[string] of ResultVal;
|
||||||
|
|
||||||
## Type to store a table of sumstats results indexed
|
## Type to store a table of sumstats results indexed
|
||||||
## by keys.
|
## by keys.
|
||||||
type ResultTable: table[Key] of Result;
|
type ResultTable: table[Key] of Result;
|
||||||
|
|
||||||
## SumStats represent an aggregation of reducers along with
|
## SumStats represent an aggregation of reducers along with
|
||||||
## mechanisms to handle various situations like the epoch ending
|
## mechanisms to handle various situations like the epoch ending
|
||||||
## or thresholds being crossed.
|
## or thresholds being crossed.
|
||||||
## It's best to not access any global state outside
|
##
|
||||||
## of the variables given to the callbacks because there
|
## It's best to not access any global state outside
|
||||||
## is no assurance provided as to where the callbacks
|
## of the variables given to the callbacks because there
|
||||||
|
## is no assurance provided as to where the callbacks
|
||||||
## will be executed on clusters.
|
## will be executed on clusters.
|
||||||
type SumStat: record {
|
type SumStat: record {
|
||||||
## The interval at which this filter should be "broken"
|
## The interval at which this filter should be "broken"
|
||||||
## and the '$epoch_finished' callback called. The
|
## and the '$epoch_finished' callback called. The
|
||||||
## results are also reset at this time so any threshold
|
## results are also reset at this time so any threshold
|
||||||
## based detection needs to be set to a
|
## based detection needs to be set to a
|
||||||
## value that should be expected to happen within
|
## value that should be expected to happen within
|
||||||
## this epoch.
|
## this epoch.
|
||||||
epoch: interval;
|
epoch: interval;
|
||||||
|
|
||||||
## The reducers for the SumStat
|
## The reducers for the SumStat
|
||||||
reducers: set[Reducer];
|
reducers: set[Reducer];
|
||||||
|
|
||||||
## Provide a function to calculate a value from the
|
## Provide a function to calculate a value from the
|
||||||
## :bro:see:`Result` structure which will be used
|
## :bro:see:`Result` structure which will be used
|
||||||
## for thresholding.
|
## for thresholding.
|
||||||
## This is required if a $threshold value is given.
|
## This is required if a $threshold value is given.
|
||||||
threshold_val: function(key: SumStats::Key, result: SumStats::Result): count &optional;
|
threshold_val: function(key: SumStats::Key, result: SumStats::Result): count &optional;
|
||||||
|
|
||||||
## The threshold value for calling the
|
## The threshold value for calling the
|
||||||
## $threshold_crossed callback.
|
## $threshold_crossed callback.
|
||||||
threshold: count &optional;
|
threshold: count &optional;
|
||||||
|
|
||||||
## A series of thresholds for calling the
|
## A series of thresholds for calling the
|
||||||
## $threshold_crossed callback.
|
## $threshold_crossed callback.
|
||||||
threshold_series: vector of count &optional;
|
threshold_series: vector of count &optional;
|
||||||
|
|
||||||
## A callback that is called when a threshold is crossed.
|
## A callback that is called when a threshold is crossed.
|
||||||
threshold_crossed: function(key: SumStats::Key, result: SumStats::Result) &optional;
|
threshold_crossed: function(key: SumStats::Key, result: SumStats::Result) &optional;
|
||||||
|
|
||||||
## A callback with the full collection of Results for
|
## A callback with the full collection of Results for
|
||||||
## this SumStat.
|
## this SumStat.
|
||||||
epoch_finished: function(rt: SumStats::ResultTable) &optional;
|
epoch_finished: function(rt: SumStats::ResultTable) &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
## Create a summary statistic.
|
## Create a summary statistic.
|
||||||
global create: function(ss: SumStats::SumStat);
|
global create: function(ss: SumStats::SumStat);
|
||||||
|
|
||||||
## Add data into an observation stream. This should be
|
## Add data into an observation stream. This should be
|
||||||
## called when a script has measured some point value.
|
## called when a script has measured some point value.
|
||||||
##
|
##
|
||||||
## id: The observation stream identifier that the data
|
## id: The observation stream identifier that the data
|
||||||
|
@ -143,13 +144,13 @@ export {
|
||||||
};
|
};
|
||||||
|
|
||||||
## This event is generated when thresholds are reset for a SumStat.
|
## This event is generated when thresholds are reset for a SumStat.
|
||||||
##
|
##
|
||||||
## ssid: SumStats ID that thresholds were reset for.
|
## ssid: SumStats ID that thresholds were reset for.
|
||||||
global thresholds_reset: event(ssid: string);
|
global thresholds_reset: event(ssid: string);
|
||||||
|
|
||||||
## Helper function to represent a :bro:type:`SumStats::Key` value as
|
## Helper function to represent a :bro:type:`SumStats::Key` value as
|
||||||
## a simple string.
|
## a simple string.
|
||||||
##
|
##
|
||||||
## key: The metric key that is to be converted into a string.
|
## key: The metric key that is to be converted into a string.
|
||||||
##
|
##
|
||||||
## Returns: A string representation of the metric key.
|
## Returns: A string representation of the metric key.
|
||||||
|
@ -181,16 +182,17 @@ global result_store: table[string] of ResultTable = table();
|
||||||
# Store of threshold information.
|
# Store of threshold information.
|
||||||
global thresholds_store: table[string, Key] of bool = table();
|
global thresholds_store: table[string, Key] of bool = table();
|
||||||
|
|
||||||
# This is called whenever
|
# This is called whenever key values are updated and the new val is given as the
|
||||||
# key values are updated and the new val is given as the `val` argument.
|
# `val` argument. It's only prototyped here because cluster and non-cluster have
|
||||||
# It's only prototyped here because cluster and non-cluster have separate
|
# separate implementations.
|
||||||
# implementations.
|
|
||||||
global data_added: function(ss: SumStat, key: Key, result: Result);
|
global data_added: function(ss: SumStat, key: Key, result: Result);
|
||||||
|
|
||||||
# Prototype the hook point for plugins to do calculations.
|
# Prototype the hook point for plugins to do calculations.
|
||||||
global observe_hook: hook(r: Reducer, val: double, data: Observation, rv: ResultVal);
|
global observe_hook: hook(r: Reducer, val: double, data: Observation, rv: ResultVal);
|
||||||
|
|
||||||
# Prototype the hook point for plugins to initialize any result values.
|
# Prototype the hook point for plugins to initialize any result values.
|
||||||
global init_resultval_hook: hook(r: Reducer, rv: ResultVal);
|
global init_resultval_hook: hook(r: Reducer, rv: ResultVal);
|
||||||
|
|
||||||
# Prototype the hook point for plugins to merge Results.
|
# Prototype the hook point for plugins to merge Results.
|
||||||
global compose_resultvals_hook: hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal);
|
global compose_resultvals_hook: hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal);
|
||||||
|
|
||||||
|
@ -252,7 +254,7 @@ function compose_results(r1: Result, r2: Result): Result
|
||||||
result[data_id] = r2[data_id];
|
result[data_id] = r2[data_id];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -306,25 +308,25 @@ function observe(id: string, key: Key, obs: Observation)
|
||||||
if ( r?$normalize_key )
|
if ( r?$normalize_key )
|
||||||
key = r$normalize_key(copy(key));
|
key = r$normalize_key(copy(key));
|
||||||
|
|
||||||
# If this reducer has a predicate, run the predicate
|
# If this reducer has a predicate, run the predicate
|
||||||
# and skip this key if the predicate return false.
|
# and skip this key if the predicate return false.
|
||||||
if ( r?$pred && ! r$pred(key, obs) )
|
if ( r?$pred && ! r$pred(key, obs) )
|
||||||
next;
|
next;
|
||||||
|
|
||||||
local ss = stats_store[r$sid];
|
local ss = stats_store[r$sid];
|
||||||
|
|
||||||
# If there is a threshold and no epoch_finished callback
|
# If there is a threshold and no epoch_finished callback
|
||||||
# we don't need to continue counting since the data will
|
# we don't need to continue counting since the data will
|
||||||
# never be accessed. This was leading
|
# never be accessed. This was leading
|
||||||
# to some state management issues when measuring
|
# to some state management issues when measuring
|
||||||
# uniqueness.
|
# uniqueness.
|
||||||
# NOTE: this optimization could need removed in the
|
# NOTE: this optimization could need removed in the
|
||||||
# future if on demand access is provided to the
|
# future if on demand access is provided to the
|
||||||
# SumStats results.
|
# SumStats results.
|
||||||
if ( ! ss?$epoch_finished &&
|
if ( ! ss?$epoch_finished &&
|
||||||
r$sid in threshold_tracker &&
|
r$sid in threshold_tracker &&
|
||||||
key in threshold_tracker[r$sid] &&
|
key in threshold_tracker[r$sid] &&
|
||||||
( ss?$threshold &&
|
( ss?$threshold &&
|
||||||
threshold_tracker[r$sid][key]$is_threshold_crossed ) ||
|
threshold_tracker[r$sid][key]$is_threshold_crossed ) ||
|
||||||
( ss?$threshold_series &&
|
( ss?$threshold_series &&
|
||||||
threshold_tracker[r$sid][key]$threshold_series_index+1 == |ss$threshold_series| ) )
|
threshold_tracker[r$sid][key]$threshold_series_index+1 == |ss$threshold_series| ) )
|
||||||
|
@ -356,7 +358,7 @@ function observe(id: string, key: Key, obs: Observation)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# This function checks if a threshold has been crossed. It is also used as a method to implement
|
# This function checks if a threshold has been crossed. It is also used as a method to implement
|
||||||
# mid-break-interval threshold crossing detection for cluster deployments.
|
# mid-break-interval threshold crossing detection for cluster deployments.
|
||||||
function check_thresholds(ss: SumStat, key: Key, result: Result, modify_pct: double): bool
|
function check_thresholds(ss: SumStat, key: Key, result: Result, modify_pct: double): bool
|
||||||
{
|
{
|
||||||
|
@ -399,7 +401,7 @@ function check_thresholds(ss: SumStat, key: Key, result: Result, modify_pct: dou
|
||||||
|ss$threshold_series| >= tt$threshold_series_index &&
|
|ss$threshold_series| >= tt$threshold_series_index &&
|
||||||
watch >= ss$threshold_series[tt$threshold_series_index] )
|
watch >= ss$threshold_series[tt$threshold_series_index] )
|
||||||
{
|
{
|
||||||
# A threshold series was given and the value crossed the next
|
# A threshold series was given and the value crossed the next
|
||||||
# value in the series.
|
# value in the series.
|
||||||
return T;
|
return T;
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,8 +15,8 @@ event SumStats::finish_epoch(ss: SumStat)
|
||||||
|
|
||||||
schedule ss$epoch { SumStats::finish_epoch(ss) };
|
schedule ss$epoch { SumStats::finish_epoch(ss) };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
function data_added(ss: SumStat, key: Key, result: Result)
|
function data_added(ss: SumStat, key: Key, result: Result)
|
||||||
{
|
{
|
||||||
if ( check_thresholds(ss, key, result, 1.0) )
|
if ( check_thresholds(ss, key, result, 1.0) )
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
@load base/frameworks/sumstats
|
@load base/frameworks/sumstats/main
|
||||||
|
|
||||||
module SumStats;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
## Calculate the average of the values.
|
## Calculate the average of the values.
|
||||||
AVERAGE
|
AVERAGE
|
||||||
};
|
};
|
||||||
|
@ -33,4 +33,4 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
|
||||||
result$average = rv1$average;
|
result$average = rv1$average;
|
||||||
else if ( rv2?$average )
|
else if ( rv2?$average )
|
||||||
result$average = rv2$average;
|
result$average = rv2$average;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
@load base/frameworks/sumstats
|
@load base/frameworks/sumstats/main
|
||||||
|
|
||||||
module SumStats;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
## Find the maximum value.
|
## Find the maximum value.
|
||||||
MAX
|
MAX
|
||||||
};
|
};
|
||||||
|
@ -18,7 +18,7 @@ hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
|
||||||
{
|
{
|
||||||
if ( MAX in r$apply )
|
if ( MAX in r$apply )
|
||||||
{
|
{
|
||||||
if ( ! rv?$max )
|
if ( ! rv?$max )
|
||||||
rv$max = val;
|
rv$max = val;
|
||||||
else if ( val > rv$max )
|
else if ( val > rv$max )
|
||||||
rv$max = val;
|
rv$max = val;
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
@load base/frameworks/sumstats
|
@load base/frameworks/sumstats/main
|
||||||
|
|
||||||
module SumStats;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
## Find the minimum value.
|
## Find the minimum value.
|
||||||
MIN
|
MIN
|
||||||
};
|
};
|
||||||
|
@ -18,7 +18,7 @@ hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
|
||||||
{
|
{
|
||||||
if ( MIN in r$apply )
|
if ( MIN in r$apply )
|
||||||
{
|
{
|
||||||
if ( ! rv?$min )
|
if ( ! rv?$min )
|
||||||
rv$min = val;
|
rv$min = val;
|
||||||
else if ( val < rv$min )
|
else if ( val < rv$min )
|
||||||
rv$min = val;
|
rv$min = val;
|
||||||
|
@ -33,4 +33,4 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
|
||||||
result$min = rv1$min;
|
result$min = rv1$min;
|
||||||
else if ( rv2?$min )
|
else if ( rv2?$min )
|
||||||
result$min = rv2$min;
|
result$min = rv2$min;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
@load base/frameworks/sumstats
|
@load base/frameworks/sumstats/main
|
||||||
@load base/utils/queue
|
@load base/utils/queue
|
||||||
|
|
||||||
module SumStats;
|
module SumStats;
|
||||||
|
@ -10,10 +10,8 @@ export {
|
||||||
};
|
};
|
||||||
|
|
||||||
redef record ResultVal += {
|
redef record ResultVal += {
|
||||||
## This is the queue where samples
|
## This is the queue where samples are maintained. Use the
|
||||||
## are maintained. Use the
|
## :bro:see:`SumStats::get_samples` function to get a vector of the samples.
|
||||||
## :bro:see:`SumStats::get_samples` function
|
|
||||||
## to get a vector of the samples.
|
|
||||||
samples: Queue::Queue &optional;
|
samples: Queue::Queue &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -48,4 +46,4 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
|
||||||
result$samples = rv1$samples;
|
result$samples = rv1$samples;
|
||||||
else if ( rv2?$samples )
|
else if ( rv2?$samples )
|
||||||
result$samples = rv2$samples;
|
result$samples = rv2$samples;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
|
@load base/frameworks/sumstats/main
|
||||||
@load ./variance
|
@load ./variance
|
||||||
@load base/frameworks/sumstats
|
|
||||||
|
|
||||||
module SumStats;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
## Find the standard deviation of the values.
|
## Find the standard deviation of the values.
|
||||||
STD_DEV
|
STD_DEV
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
@load base/frameworks/sumstats
|
@load base/frameworks/sumstats/main
|
||||||
|
|
||||||
module SumStats;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
## Sums the values given. For string values,
|
## Sums the values given. For string values,
|
||||||
## this will be the number of strings given.
|
## this will be the number of strings given.
|
||||||
SUM
|
SUM
|
||||||
|
@ -48,4 +48,4 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
|
||||||
if ( rv2?$sum )
|
if ( rv2?$sum )
|
||||||
result$sum += rv2$sum;
|
result$sum += rv2$sum;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
@load base/frameworks/sumstats
|
@load base/frameworks/sumstats/main
|
||||||
|
|
||||||
module SumStats;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
## Calculate the number of unique values.
|
## Calculate the number of unique values.
|
||||||
UNIQUE
|
UNIQUE
|
||||||
};
|
};
|
||||||
|
@ -16,8 +16,8 @@ export {
|
||||||
}
|
}
|
||||||
|
|
||||||
redef record ResultVal += {
|
redef record ResultVal += {
|
||||||
# Internal use only. This is not meant to be publically available
|
# Internal use only. This is not meant to be publically available
|
||||||
# because we don't want to trust that we can inspect the values
|
# because we don't want to trust that we can inspect the values
|
||||||
# since we will like move to a probalistic data structure in the future.
|
# since we will like move to a probalistic data structure in the future.
|
||||||
# TODO: in the future this will optionally be a hyperloglog structure
|
# TODO: in the future this will optionally be a hyperloglog structure
|
||||||
unique_vals: set[Observation] &optional;
|
unique_vals: set[Observation] &optional;
|
||||||
|
@ -27,7 +27,7 @@ hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
|
||||||
{
|
{
|
||||||
if ( UNIQUE in r$apply )
|
if ( UNIQUE in r$apply )
|
||||||
{
|
{
|
||||||
if ( ! rv?$unique_vals )
|
if ( ! rv?$unique_vals )
|
||||||
rv$unique_vals=set();
|
rv$unique_vals=set();
|
||||||
add rv$unique_vals[obs];
|
add rv$unique_vals[obs];
|
||||||
rv$unique = |rv$unique_vals|;
|
rv$unique = |rv$unique_vals|;
|
||||||
|
@ -40,7 +40,7 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
|
||||||
{
|
{
|
||||||
if ( rv1?$unique_vals )
|
if ( rv1?$unique_vals )
|
||||||
result$unique_vals = rv1$unique_vals;
|
result$unique_vals = rv1$unique_vals;
|
||||||
|
|
||||||
if ( rv2?$unique_vals )
|
if ( rv2?$unique_vals )
|
||||||
if ( ! result?$unique_vals )
|
if ( ! result?$unique_vals )
|
||||||
result$unique_vals = rv2$unique_vals;
|
result$unique_vals = rv2$unique_vals;
|
||||||
|
@ -50,4 +50,4 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
|
||||||
|
|
||||||
result$unique = |result$unique_vals|;
|
result$unique = |result$unique_vals|;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
|
@load base/frameworks/sumstats/main
|
||||||
@load ./average
|
@load ./average
|
||||||
@load base/frameworks/sumstats
|
|
||||||
|
|
||||||
module SumStats;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
## Find the variance of the values.
|
## Find the variance of the values.
|
||||||
VARIANCE
|
VARIANCE
|
||||||
};
|
};
|
||||||
|
@ -66,4 +66,4 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
|
||||||
result$prev_avg = rv2$prev_avg;
|
result$prev_avg = rv2$prev_avg;
|
||||||
|
|
||||||
calc_variance(result);
|
calc_variance(result);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
##! Base SSH analysis script. The heuristic to blindly determine success or
|
##! Base SSH analysis script. The heuristic to blindly determine success or
|
||||||
##! failure for SSH connections is implemented here. At this time, it only
|
##! failure for SSH connections is implemented here. At this time, it only
|
||||||
##! uses the size of the data being returned from the server to make the
|
##! uses the size of the data being returned from the server to make the
|
||||||
##! heuristic determination about success of the connection.
|
##! heuristic determination about success of the connection.
|
||||||
##! Requires that :bro:id:`use_conn_size_analyzer` is set to T! The heuristic
|
##! Requires that :bro:id:`use_conn_size_analyzer` is set to T! The heuristic
|
||||||
##! is not attempted if the connection size analyzer isn't enabled.
|
##! is not attempted if the connection size analyzer isn't enabled.
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ module SSH;
|
||||||
export {
|
export {
|
||||||
## The SSH protocol logging stream identifier.
|
## The SSH protocol logging stream identifier.
|
||||||
redef enum Log::ID += { LOG };
|
redef enum Log::ID += { LOG };
|
||||||
|
|
||||||
type Info: record {
|
type Info: record {
|
||||||
## Time when the SSH connection began.
|
## Time when the SSH connection began.
|
||||||
ts: time &log;
|
ts: time &log;
|
||||||
|
@ -26,9 +26,9 @@ export {
|
||||||
## The connection's 4-tuple of endpoint addresses/ports.
|
## The connection's 4-tuple of endpoint addresses/ports.
|
||||||
id: conn_id &log;
|
id: conn_id &log;
|
||||||
## Indicates if the login was heuristically guessed to be "success",
|
## Indicates if the login was heuristically guessed to be "success",
|
||||||
## "failure", or "undetermined".
|
## "failure", or "undetermined".
|
||||||
status: string &log &default="undetermined";
|
status: string &log &default="undetermined";
|
||||||
## Direction of the connection. If the client was a local host
|
## Direction of the connection. If the client was a local host
|
||||||
## logging into an external host, this would be OUTBOUND. INBOUND
|
## logging into an external host, this would be OUTBOUND. INBOUND
|
||||||
## would be set for the opposite situation.
|
## would be set for the opposite situation.
|
||||||
# TODO: handle local-local and remote-remote better.
|
# TODO: handle local-local and remote-remote better.
|
||||||
|
@ -38,33 +38,33 @@ export {
|
||||||
## Software string from the server.
|
## Software string from the server.
|
||||||
server: string &log &optional;
|
server: string &log &optional;
|
||||||
## Amount of data returned from the server. This is currently
|
## Amount of data returned from the server. This is currently
|
||||||
## the only measure of the success heuristic and it is logged to
|
## the only measure of the success heuristic and it is logged to
|
||||||
## assist analysts looking at the logs to make their own determination
|
## assist analysts looking at the logs to make their own determination
|
||||||
## about the success on a case-by-case basis.
|
## about the success on a case-by-case basis.
|
||||||
resp_size: count &log &default=0;
|
resp_size: count &log &default=0;
|
||||||
|
|
||||||
## Indicate if the SSH session is done being watched.
|
## Indicate if the SSH session is done being watched.
|
||||||
done: bool &default=F;
|
done: bool &default=F;
|
||||||
};
|
};
|
||||||
|
|
||||||
## The size in bytes of data sent by the server at which the SSH
|
## The size in bytes of data sent by the server at which the SSH
|
||||||
## connection is presumed to be successful.
|
## connection is presumed to be successful.
|
||||||
const authentication_data_size = 4000 &redef;
|
const authentication_data_size = 4000 &redef;
|
||||||
|
|
||||||
## If true, we tell the event engine to not look at further data
|
## If true, we tell the event engine to not look at further data
|
||||||
## packets after the initial SSH handshake. Helps with performance
|
## packets after the initial SSH handshake. Helps with performance
|
||||||
## (especially with large file transfers) but precludes some
|
## (especially with large file transfers) but precludes some
|
||||||
## kinds of analyses.
|
## kinds of analyses.
|
||||||
const skip_processing_after_detection = F &redef;
|
const skip_processing_after_detection = F &redef;
|
||||||
|
|
||||||
## Event that is generated when the heuristic thinks that a login
|
## Event that is generated when the heuristic thinks that a login
|
||||||
## was successful.
|
## was successful.
|
||||||
global heuristic_successful_login: event(c: connection);
|
global heuristic_successful_login: event(c: connection);
|
||||||
|
|
||||||
## Event that is generated when the heuristic thinks that a login
|
## Event that is generated when the heuristic thinks that a login
|
||||||
## failed.
|
## failed.
|
||||||
global heuristic_failed_login: event(c: connection);
|
global heuristic_failed_login: event(c: connection);
|
||||||
|
|
||||||
## Event that can be handled to access the :bro:type:`SSH::Info`
|
## Event that can be handled to access the :bro:type:`SSH::Info`
|
||||||
## record as it is sent on to the logging framework.
|
## record as it is sent on to the logging framework.
|
||||||
global log_ssh: event(rec: Info);
|
global log_ssh: event(rec: Info);
|
||||||
|
@ -102,21 +102,21 @@ function check_ssh_connection(c: connection, done: bool)
|
||||||
# If already done watching this connection, just return.
|
# If already done watching this connection, just return.
|
||||||
if ( c$ssh$done )
|
if ( c$ssh$done )
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if ( done )
|
if ( done )
|
||||||
{
|
{
|
||||||
# If this connection is done, then we can look to see if
|
# If this connection is done, then we can look to see if
|
||||||
# this matches the conditions for a failed login. Failed
|
# this matches the conditions for a failed login. Failed
|
||||||
# logins are only detected at connection state removal.
|
# logins are only detected at connection state removal.
|
||||||
|
|
||||||
if ( # Require originators to have sent at least 50 bytes.
|
if ( # Require originators to have sent at least 50 bytes.
|
||||||
c$orig$size > 50 &&
|
c$orig$size > 50 &&
|
||||||
# Responders must be below 4000 bytes.
|
# Responders must be below 4000 bytes.
|
||||||
c$resp$size < 4000 &&
|
c$resp$size < 4000 &&
|
||||||
# Responder must have sent fewer than 40 packets.
|
# Responder must have sent fewer than 40 packets.
|
||||||
c$resp$num_pkts < 40 &&
|
c$resp$num_pkts < 40 &&
|
||||||
# If there was a content gap we can't reliably do this heuristic.
|
# If there was a content gap we can't reliably do this heuristic.
|
||||||
c?$conn && c$conn$missed_bytes == 0)# &&
|
c?$conn && c$conn$missed_bytes == 0)# &&
|
||||||
# Only "normal" connections can count.
|
# Only "normal" connections can count.
|
||||||
#c$conn?$conn_state && c$conn$conn_state in valid_states )
|
#c$conn?$conn_state && c$conn$conn_state in valid_states )
|
||||||
{
|
{
|
||||||
|
@ -147,13 +147,13 @@ function check_ssh_connection(c: connection, done: bool)
|
||||||
|
|
||||||
# Set the direction for the log.
|
# Set the direction for the log.
|
||||||
c$ssh$direction = Site::is_local_addr(c$id$orig_h) ? OUTBOUND : INBOUND;
|
c$ssh$direction = Site::is_local_addr(c$id$orig_h) ? OUTBOUND : INBOUND;
|
||||||
|
|
||||||
# Set the "done" flag to prevent the watching event from rescheduling
|
# Set the "done" flag to prevent the watching event from rescheduling
|
||||||
# after detection is done.
|
# after detection is done.
|
||||||
c$ssh$done=T;
|
c$ssh$done=T;
|
||||||
|
|
||||||
Log::write(SSH::LOG, c$ssh);
|
Log::write(SSH::LOG, c$ssh);
|
||||||
|
|
||||||
if ( skip_processing_after_detection )
|
if ( skip_processing_after_detection )
|
||||||
{
|
{
|
||||||
# Stop watching this connection, we don't care about it anymore.
|
# Stop watching this connection, we don't care about it anymore.
|
||||||
|
@ -186,12 +186,12 @@ event ssh_server_version(c: connection, version: string) &priority=5
|
||||||
set_session(c);
|
set_session(c);
|
||||||
c$ssh$server = version;
|
c$ssh$server = version;
|
||||||
}
|
}
|
||||||
|
|
||||||
event ssh_client_version(c: connection, version: string) &priority=5
|
event ssh_client_version(c: connection, version: string) &priority=5
|
||||||
{
|
{
|
||||||
set_session(c);
|
set_session(c);
|
||||||
c$ssh$client = version;
|
c$ssh$client = version;
|
||||||
|
|
||||||
# The heuristic detection for SSH relies on the ConnSize analyzer.
|
# The heuristic detection for SSH relies on the ConnSize analyzer.
|
||||||
# Don't do the heuristics if it's disabled.
|
# Don't do the heuristics if it's disabled.
|
||||||
if ( use_conn_size_analyzer )
|
if ( use_conn_size_analyzer )
|
||||||
|
|
|
@ -6,7 +6,7 @@ export {
|
||||||
## Settings for initializing the queue.
|
## Settings for initializing the queue.
|
||||||
type Settings: record {
|
type Settings: record {
|
||||||
## If a maximum length is set for the queue
|
## If a maximum length is set for the queue
|
||||||
## it will maintain itself at that
|
## it will maintain itself at that
|
||||||
## maximum length automatically.
|
## maximum length automatically.
|
||||||
max_len: count &optional;
|
max_len: count &optional;
|
||||||
};
|
};
|
||||||
|
@ -15,17 +15,17 @@ export {
|
||||||
type Queue: record {};
|
type Queue: record {};
|
||||||
|
|
||||||
## Initialize a queue record structure.
|
## Initialize a queue record structure.
|
||||||
##
|
##
|
||||||
## s: A :bro:record:`Settings` record configuring the queue.
|
## s: A :bro:record:`Settings` record configuring the queue.
|
||||||
##
|
##
|
||||||
## Returns: An opaque queue record.
|
## Returns: An opaque queue record.
|
||||||
global init: function(s: Settings): Queue;
|
global init: function(s: Settings): Queue;
|
||||||
|
|
||||||
## Put a string onto the beginning of a queue.
|
## Put a string onto the beginning of a queue.
|
||||||
##
|
##
|
||||||
## q: The queue to put the value into.
|
## q: The queue to put the value into.
|
||||||
##
|
##
|
||||||
## val: The value to insert into the queue.
|
## val: The value to insert into the queue.
|
||||||
global put: function(q: Queue, val: any);
|
global put: function(q: Queue, val: any);
|
||||||
|
|
||||||
## Get a string from the end of a queue.
|
## Get a string from the end of a queue.
|
||||||
|
@ -35,29 +35,29 @@ export {
|
||||||
## Returns: The value gotten from the queue.
|
## Returns: The value gotten from the queue.
|
||||||
global get: function(q: Queue): any;
|
global get: function(q: Queue): any;
|
||||||
|
|
||||||
## Merge two queue's together. If any settings are applied
|
## Merge two queue's together. If any settings are applied
|
||||||
## to the queues, the settings from q1 are used for the new
|
## to the queues, the settings from q1 are used for the new
|
||||||
## merged queue.
|
## merged queue.
|
||||||
##
|
##
|
||||||
## q1: The first queue. Settings are taken from here.
|
## q1: The first queue. Settings are taken from here.
|
||||||
##
|
##
|
||||||
## q2: The second queue.
|
## q2: The second queue.
|
||||||
##
|
##
|
||||||
## Returns: A new queue from merging the other two together.
|
## Returns: A new queue from merging the other two together.
|
||||||
global merge: function(q1: Queue, q2: Queue): Queue;
|
global merge: function(q1: Queue, q2: Queue): Queue;
|
||||||
|
|
||||||
## Get the number of items in a queue.
|
## Get the number of items in a queue.
|
||||||
##
|
##
|
||||||
## q: The queue.
|
## q: The queue.
|
||||||
##
|
##
|
||||||
## Returns: The length of the queue.
|
## Returns: The length of the queue.
|
||||||
global len: function(q: Queue): count;
|
global len: function(q: Queue): count;
|
||||||
|
|
||||||
## Get the contents of the queue as a vector.
|
## Get the contents of the queue as a vector.
|
||||||
##
|
##
|
||||||
## q: The queue.
|
## q: The queue.
|
||||||
##
|
##
|
||||||
## ret: A vector containing the
|
## ret: A vector containing the
|
||||||
## current contents of q as the type of ret.
|
## current contents of q as the type of ret.
|
||||||
global get_vector: function(q: Queue, ret: vector of any);
|
global get_vector: function(q: Queue, ret: vector of any);
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@ function get_vector(q: Queue, ret: vector of any)
|
||||||
local i = q$bottom;
|
local i = q$bottom;
|
||||||
local j = 0;
|
local j = 0;
|
||||||
# Really dumb hack, this is only to provide
|
# Really dumb hack, this is only to provide
|
||||||
# the iteration for the correct number of
|
# the iteration for the correct number of
|
||||||
# values in q$vals.
|
# values in q$vals.
|
||||||
for ( ignored_val in q$vals )
|
for ( ignored_val in q$vals )
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
|
|
||||||
## Given an interval, returns a string of the form 3m34s to
|
## Given an interval, returns a string of the form 3m34s to
|
||||||
## give a minimalized human readable string for the minutes
|
## give a minimalized human readable string for the minutes
|
||||||
## and seconds represented by the interval.
|
## and seconds represented by the interval.
|
||||||
function duration_to_mins_secs(dur: interval): string
|
function duration_to_mins_secs(dur: interval): string
|
||||||
{
|
{
|
||||||
|
|
|
@ -36,9 +36,9 @@ event bro_init() &priority=3
|
||||||
|
|
||||||
local r1: SumStats::Reducer = [$stream="apps.bytes", $apply=set(SumStats::SUM)];
|
local r1: SumStats::Reducer = [$stream="apps.bytes", $apply=set(SumStats::SUM)];
|
||||||
local r2: SumStats::Reducer = [$stream="apps.hits", $apply=set(SumStats::UNIQUE)];
|
local r2: SumStats::Reducer = [$stream="apps.hits", $apply=set(SumStats::UNIQUE)];
|
||||||
SumStats::create([$epoch=break_interval,
|
SumStats::create([$epoch=break_interval,
|
||||||
$reducers=set(r1, r2),
|
$reducers=set(r1, r2),
|
||||||
$epoch_finished(data: SumStats::ResultTable) =
|
$epoch_finished(data: SumStats::ResultTable) =
|
||||||
{
|
{
|
||||||
local l: Info;
|
local l: Info;
|
||||||
l$ts = network_time();
|
l$ts = network_time();
|
||||||
|
@ -67,12 +67,12 @@ function add_sumstats(id: conn_id, hostname: string, size: count)
|
||||||
SumStats::observe("apps.bytes", [$str="facebook"], [$num=size]);
|
SumStats::observe("apps.bytes", [$str="facebook"], [$num=size]);
|
||||||
SumStats::observe("apps.hits", [$str="facebook"], [$str=cat(id$orig_h)]);
|
SumStats::observe("apps.hits", [$str="facebook"], [$str=cat(id$orig_h)]);
|
||||||
}
|
}
|
||||||
else if ( /\.google\.com$/ in hostname && size > 20 )
|
else if ( /\.google\.com$/ in hostname && size > 20 )
|
||||||
{
|
{
|
||||||
SumStats::observe("apps.bytes", [$str="google"], [$num=size]);
|
SumStats::observe("apps.bytes", [$str="google"], [$num=size]);
|
||||||
SumStats::observe("apps.hits", [$str="google"], [$str=cat(id$orig_h)]);
|
SumStats::observe("apps.hits", [$str="google"], [$str=cat(id$orig_h)]);
|
||||||
}
|
}
|
||||||
else if ( /\.nflximg\.com$/ in hostname && size > 200*1024 )
|
else if ( /\.nflximg\.com$/ in hostname && size > 200*1024 )
|
||||||
{
|
{
|
||||||
SumStats::observe("apps.bytes", [$str="netflix"], [$num=size]);
|
SumStats::observe("apps.bytes", [$str="netflix"], [$num=size]);
|
||||||
SumStats::observe("apps.hits", [$str="netflix"], [$str=cat(id$orig_h)]);
|
SumStats::observe("apps.hits", [$str="netflix"], [$str=cat(id$orig_h)]);
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
##! This script detects large number of ICMP Time Exceeded messages heading
|
##! This script detects a large number of ICMP Time Exceeded messages heading toward
|
||||||
##! toward hosts that have sent low TTL packets.
|
##! hosts that have sent low TTL packets. It generates a notice when the number of
|
||||||
##! It generates a notice when the number of ICMP Time Exceeded
|
##! ICMP Time Exceeded messages for a source-destination pair exceeds a
|
||||||
##! messages for a source-destination pair exceeds threshold
|
##! threshold.
|
||||||
@load base/frameworks/sumstats
|
@load base/frameworks/sumstats
|
||||||
@load base/frameworks/signatures
|
@load base/frameworks/signatures
|
||||||
@load-sigs ./detect-low-ttls.sig
|
@load-sigs ./detect-low-ttls.sig
|
||||||
|
@ -22,10 +22,10 @@ export {
|
||||||
|
|
||||||
## By default this script requires that any host detected running traceroutes
|
## By default this script requires that any host detected running traceroutes
|
||||||
## first send low TTL packets (TTL < 10) to the traceroute destination host.
|
## first send low TTL packets (TTL < 10) to the traceroute destination host.
|
||||||
## Changing this this setting to `F` will relax the detection a bit by
|
## Changing this this setting to `F` will relax the detection a bit by
|
||||||
## solely relying on ICMP time-exceeded messages to detect traceroute.
|
## solely relying on ICMP time-exceeded messages to detect traceroute.
|
||||||
const require_low_ttl_packets = T &redef;
|
const require_low_ttl_packets = T &redef;
|
||||||
|
|
||||||
## Defines the threshold for ICMP Time Exceeded messages for a src-dst pair.
|
## Defines the threshold for ICMP Time Exceeded messages for a src-dst pair.
|
||||||
## This threshold only comes into play after a host is found to be
|
## This threshold only comes into play after a host is found to be
|
||||||
## sending low ttl packets.
|
## sending low ttl packets.
|
||||||
|
@ -39,11 +39,13 @@ export {
|
||||||
## The log record for the traceroute log.
|
## The log record for the traceroute log.
|
||||||
type Info: record {
|
type Info: record {
|
||||||
## Timestamp
|
## Timestamp
|
||||||
ts: time &log;
|
ts: time &log;
|
||||||
## Address initiaing the traceroute.
|
## Address initiaing the traceroute.
|
||||||
src: addr &log;
|
src: addr &log;
|
||||||
## Destination address of the traceroute.
|
## Destination address of the traceroute.
|
||||||
dst: addr &log;
|
dst: addr &log;
|
||||||
|
## Protocol used for the traceroute.
|
||||||
|
proto: string &log;
|
||||||
};
|
};
|
||||||
|
|
||||||
global log_traceroute: event(rec: Traceroute::Info);
|
global log_traceroute: event(rec: Traceroute::Info);
|
||||||
|
@ -59,7 +61,7 @@ event bro_init() &priority=5
|
||||||
$reducers=set(r1, r2),
|
$reducers=set(r1, r2),
|
||||||
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
# Give a threshold value of zero depending on if the host
|
# Give a threshold value of zero depending on if the host
|
||||||
# sends a low ttl packet.
|
# sends a low ttl packet.
|
||||||
if ( require_low_ttl_packets && result["traceroute.low_ttl_packet"]$sum == 0 )
|
if ( require_low_ttl_packets && result["traceroute.low_ttl_packet"]$sum == 0 )
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -69,14 +71,15 @@ event bro_init() &priority=5
|
||||||
$threshold=icmp_time_exceeded_threshold,
|
$threshold=icmp_time_exceeded_threshold,
|
||||||
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
local parts = split1(key$str, /-/);
|
local parts = split_n(key$str, /-/, F, 2);
|
||||||
local src = to_addr(parts[1]);
|
local src = to_addr(parts[1]);
|
||||||
local dst = to_addr(parts[2]);
|
local dst = to_addr(parts[2]);
|
||||||
Log::write(LOG, [$ts=network_time(), $src=src, $dst=dst]);
|
local proto = parts[3];
|
||||||
|
Log::write(LOG, [$ts=network_time(), $src=src, $dst=dst, $proto=proto]);
|
||||||
NOTICE([$note=Traceroute::Detected,
|
NOTICE([$note=Traceroute::Detected,
|
||||||
$msg=fmt("%s seems to be running traceroute", src),
|
$msg=fmt("%s seems to be running traceroute using %s", src, proto),
|
||||||
$src=src, $dst=dst,
|
$src=src,
|
||||||
$identifier=cat(src)]);
|
$identifier=cat(src,proto)]);
|
||||||
}]);
|
}]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,10 +87,12 @@ event bro_init() &priority=5
|
||||||
event signature_match(state: signature_state, msg: string, data: string)
|
event signature_match(state: signature_state, msg: string, data: string)
|
||||||
{
|
{
|
||||||
if ( state$sig_id == /traceroute-detector.*/ )
|
if ( state$sig_id == /traceroute-detector.*/ )
|
||||||
SumStats::observe("traceroute.low_ttl_packet", [$str=cat(state$conn$id$orig_h,"-",state$conn$id$resp_h)], [$num=1]);
|
{
|
||||||
|
SumStats::observe("traceroute.low_ttl_packet", [$str=cat(state$conn$id$orig_h,"-",state$conn$id$resp_h,"-",get_port_transport_proto(state$conn$id$resp_p))], [$num=1]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
event icmp_time_exceeded(c: connection, icmp: icmp_conn, code: count, context: icmp_context)
|
event icmp_time_exceeded(c: connection, icmp: icmp_conn, code: count, context: icmp_context)
|
||||||
{
|
{
|
||||||
SumStats::observe("traceroute.time_exceeded", [$str=cat(context$id$orig_h,"-",context$id$resp_h)], [$str=cat(c$id$orig_h)]);
|
SumStats::observe("traceroute.time_exceeded", [$str=cat(context$id$orig_h,"-",context$id$resp_h,"-",get_port_transport_proto(context$id$resp_p))], [$str=cat(c$id$orig_h)]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,36 +13,39 @@ module Scan;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Notice::Type += {
|
redef enum Notice::Type += {
|
||||||
## Address scans detect that a host appears to be scanning some number
|
## Address scans detect that a host appears to be scanning some number of
|
||||||
## of hosts on a single port. This notice is generated when more than
|
## destinations on a single port. This notice is generated when more than
|
||||||
## :bro:id:`addr_scan_threshold` unique hosts are seen over the
|
## :bro:id:`addr_scan_threshold` unique hosts are seen over the previous
|
||||||
## previous :bro:id:`addr_scan_interval` time range.
|
## :bro:id:`addr_scan_interval` time range.
|
||||||
Address_Scan,
|
Address_Scan,
|
||||||
## Port scans detect that an attacking host appears to be scanning a
|
|
||||||
## single victim host on several ports. This notice is generated when
|
## Port scans detect that an attacking host appears to be scanning a
|
||||||
## an attacking host attempts to connect to :bro:id:`port_scan_threshold`
|
## single victim host on several ports. This notice is generated when
|
||||||
## unique ports on a single host over the previous
|
## an attacking host attempts to connect to :bro:id:`port_scan_threshold`
|
||||||
|
## unique ports on a single host over the previous
|
||||||
## :bro:id:`port_scan_interval` time range.
|
## :bro:id:`port_scan_interval` time range.
|
||||||
Port_Scan,
|
Port_Scan,
|
||||||
};
|
};
|
||||||
|
|
||||||
## Failed connection attempts are tracked over this time interval for the address
|
## Failed connection attempts are tracked over this time interval for the address
|
||||||
## scan detection. A higher interval will detect slower scanners, but may
|
## scan detection. A higher interval will detect slower scanners, but may also
|
||||||
## also yield more false positives.
|
## yield more false positives.
|
||||||
const addr_scan_interval = 5min &redef;
|
const addr_scan_interval = 5min &redef;
|
||||||
## Failed connection attempts are tracked over this time interval for the port
|
|
||||||
## scan detection. A higher interval will detect slower scanners, but may
|
## Failed connection attempts are tracked over this time interval for the port scan
|
||||||
## also yield more false positives.
|
## detection. A higher interval will detect slower scanners, but may also yield
|
||||||
|
## more false positives.
|
||||||
const port_scan_interval = 5min &redef;
|
const port_scan_interval = 5min &redef;
|
||||||
|
|
||||||
## The threshold of a unique number of hosts a scanning host has to have failed
|
## The threshold of a unique number of hosts a scanning host has to have failed
|
||||||
## connections with on a single port.
|
## connections with on a single port.
|
||||||
const addr_scan_threshold = 25 &redef;
|
const addr_scan_threshold = 25 &redef;
|
||||||
|
|
||||||
## The threshold of a number of unique ports a scanning host has to have failed
|
## The threshold of a number of unique ports a scanning host has to have failed
|
||||||
## connections with on a single victim host.
|
## connections with on a single victim host.
|
||||||
const port_scan_threshold = 15 &redef;
|
const port_scan_threshold = 15 &redef;
|
||||||
|
|
||||||
## Custom thresholds based on service for address scan. This is primarily
|
## Custom thresholds based on service for address scan. This is primarily
|
||||||
## useful for setting reduced thresholds for specific ports.
|
## useful for setting reduced thresholds for specific ports.
|
||||||
const addr_scan_custom_thresholds: table[port] of count &redef;
|
const addr_scan_custom_thresholds: table[port] of count &redef;
|
||||||
|
|
||||||
|
@ -73,14 +76,14 @@ event bro_init() &priority=5
|
||||||
$sub=side,
|
$sub=side,
|
||||||
$msg=message,
|
$msg=message,
|
||||||
$identifier=cat(key$host)]);
|
$identifier=cat(key$host)]);
|
||||||
}]);
|
}]);
|
||||||
|
|
||||||
# Note: port scans are tracked similar to: table[src_ip, dst_ip] of set(port);
|
# Note: port scans are tracked similar to: table[src_ip, dst_ip] of set(port);
|
||||||
local r2: SumStats::Reducer = [$stream="scan.port.fail", $apply=set(SumStats::UNIQUE)];
|
local r2: SumStats::Reducer = [$stream="scan.port.fail", $apply=set(SumStats::UNIQUE)];
|
||||||
SumStats::create([$epoch=port_scan_interval,
|
SumStats::create([$epoch=port_scan_interval,
|
||||||
$reducers=set(r2),
|
$reducers=set(r2),
|
||||||
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
return double_to_count(result["scan.port.fail"]$unique);
|
return double_to_count(result["scan.port.fail"]$unique);
|
||||||
},
|
},
|
||||||
$threshold=port_scan_threshold,
|
$threshold=port_scan_threshold,
|
||||||
|
@ -90,13 +93,13 @@ event bro_init() &priority=5
|
||||||
local side = Site::is_local_addr(key$host) ? "local" : "remote";
|
local side = Site::is_local_addr(key$host) ? "local" : "remote";
|
||||||
local dur = duration_to_mins_secs(r$end-r$begin);
|
local dur = duration_to_mins_secs(r$end-r$begin);
|
||||||
local message = fmt("%s scanned at least %d unique ports of host %s in %s", key$host, r$unique, key$str, dur);
|
local message = fmt("%s scanned at least %d unique ports of host %s in %s", key$host, r$unique, key$str, dur);
|
||||||
NOTICE([$note=Port_Scan,
|
NOTICE([$note=Port_Scan,
|
||||||
$src=key$host,
|
$src=key$host,
|
||||||
$dst=to_addr(key$str),
|
$dst=to_addr(key$str),
|
||||||
$sub=side,
|
$sub=side,
|
||||||
$msg=message,
|
$msg=message,
|
||||||
$identifier=cat(key$host)]);
|
$identifier=cat(key$host)]);
|
||||||
}]);
|
}]);
|
||||||
}
|
}
|
||||||
|
|
||||||
function add_sumstats(id: conn_id, reverse: bool)
|
function add_sumstats(id: conn_id, reverse: bool)
|
||||||
|
@ -111,7 +114,7 @@ function add_sumstats(id: conn_id, reverse: bool)
|
||||||
victim = id$orig_h;
|
victim = id$orig_h;
|
||||||
scanned_port = id$orig_p;
|
scanned_port = id$orig_p;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( hook Scan::addr_scan_policy(scanner, victim, scanned_port) )
|
if ( hook Scan::addr_scan_policy(scanner, victim, scanned_port) )
|
||||||
SumStats::observe("scan.addr.fail", [$host=scanner, $str=cat(scanned_port)], [$str=cat(victim)]);
|
SumStats::observe("scan.addr.fail", [$host=scanner, $str=cat(scanned_port)], [$str=cat(victim)]);
|
||||||
|
|
||||||
|
@ -121,7 +124,7 @@ function add_sumstats(id: conn_id, reverse: bool)
|
||||||
|
|
||||||
function is_failed_conn(c: connection): bool
|
function is_failed_conn(c: connection): bool
|
||||||
{
|
{
|
||||||
# Sr || ( (hR || ShR) && (data not sent in any direction) )
|
# Sr || ( (hR || ShR) && (data not sent in any direction) )
|
||||||
if ( (c$orig$state == TCP_SYN_SENT && c$resp$state == TCP_RESET) ||
|
if ( (c$orig$state == TCP_SYN_SENT && c$resp$state == TCP_RESET) ||
|
||||||
(((c$orig$state == TCP_RESET && c$resp$state == TCP_SYN_ACK_SENT) ||
|
(((c$orig$state == TCP_RESET && c$resp$state == TCP_SYN_ACK_SENT) ||
|
||||||
(c$orig$state == TCP_RESET && c$resp$state == TCP_ESTABLISHED && "S" in c$history )
|
(c$orig$state == TCP_RESET && c$resp$state == TCP_ESTABLISHED && "S" in c$history )
|
||||||
|
@ -134,7 +137,7 @@ function is_failed_conn(c: connection): bool
|
||||||
function is_reverse_failed_conn(c: connection): bool
|
function is_reverse_failed_conn(c: connection): bool
|
||||||
{
|
{
|
||||||
# reverse scan i.e. conn dest is the scanner
|
# reverse scan i.e. conn dest is the scanner
|
||||||
# sR || ( (Hr || sHr) && (data not sent in any direction) )
|
# sR || ( (Hr || sHr) && (data not sent in any direction) )
|
||||||
if ( (c$resp$state == TCP_SYN_SENT && c$orig$state == TCP_RESET) ||
|
if ( (c$resp$state == TCP_SYN_SENT && c$orig$state == TCP_RESET) ||
|
||||||
(((c$resp$state == TCP_RESET && c$orig$state == TCP_SYN_ACK_SENT) ||
|
(((c$resp$state == TCP_RESET && c$orig$state == TCP_SYN_ACK_SENT) ||
|
||||||
(c$resp$state == TCP_RESET && c$orig$state == TCP_ESTABLISHED && "s" in c$history )
|
(c$resp$state == TCP_RESET && c$orig$state == TCP_ESTABLISHED && "s" in c$history )
|
||||||
|
@ -144,37 +147,34 @@ function is_reverse_failed_conn(c: connection): bool
|
||||||
return F;
|
return F;
|
||||||
}
|
}
|
||||||
|
|
||||||
## Generated for an unsuccessful connection attempt. This
|
## Generated for an unsuccessful connection attempt. This
|
||||||
## event is raised when an originator unsuccessfully attempted
|
## event is raised when an originator unsuccessfully attempted
|
||||||
## to establish a connection. “Unsuccessful” is defined as at least
|
## to establish a connection. “Unsuccessful” is defined as at least
|
||||||
## tcp_attempt_delay seconds having elapsed since the originator
|
## tcp_attempt_delay seconds having elapsed since the originator first sent a
|
||||||
## first sent a connection establishment packet to the destination
|
## connection establishment packet to the destination without seeing a reply.
|
||||||
## without seeing a reply.
|
|
||||||
event connection_attempt(c: connection)
|
event connection_attempt(c: connection)
|
||||||
{
|
{
|
||||||
local is_reverse_scan = F;
|
local is_reverse_scan = F;
|
||||||
if ( "H" in c$history )
|
if ( "H" in c$history )
|
||||||
is_reverse_scan = T;
|
is_reverse_scan = T;
|
||||||
|
|
||||||
add_sumstats(c$id, is_reverse_scan);
|
add_sumstats(c$id, is_reverse_scan);
|
||||||
}
|
}
|
||||||
|
|
||||||
## Generated for a rejected TCP connection. This event
|
## Generated for a rejected TCP connection. This event is raised when an originator
|
||||||
## is raised when an originator attempted to setup a TCP
|
## attempted to setup a TCP connection but the responder replied with a RST packet
|
||||||
## connection but the responder replied with a RST packet
|
|
||||||
## denying it.
|
## denying it.
|
||||||
event connection_rejected(c: connection)
|
event connection_rejected(c: connection)
|
||||||
{
|
{
|
||||||
local is_reverse_scan = F;
|
local is_reverse_scan = F;
|
||||||
if ( "s" in c$history )
|
if ( "s" in c$history )
|
||||||
is_reverse_scan = T;
|
is_reverse_scan = T;
|
||||||
|
|
||||||
add_sumstats(c$id, is_reverse_scan);
|
add_sumstats(c$id, is_reverse_scan);
|
||||||
}
|
}
|
||||||
|
|
||||||
## Generated when an endpoint aborted a TCP connection.
|
## Generated when an endpoint aborted a TCP connection. The event is raised when
|
||||||
## The event is raised when one endpoint of an *established*
|
## one endpoint of an *established* TCP connection aborted by sending a RST packet.
|
||||||
## TCP connection aborted by sending a RST packet.
|
|
||||||
event connection_reset(c: connection)
|
event connection_reset(c: connection)
|
||||||
{
|
{
|
||||||
if ( is_failed_conn(c) )
|
if ( is_failed_conn(c) )
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
##! FTP brute-forcing detector, triggering when too many rejected usernames or
|
||||||
|
##! failed passwords have occured from a single address.
|
||||||
|
|
||||||
@load base/protocols/ftp
|
@load base/protocols/ftp
|
||||||
@load base/frameworks/sumstats
|
@load base/frameworks/sumstats
|
||||||
|
@ -7,13 +9,13 @@
|
||||||
module FTP;
|
module FTP;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Notice::Type += {
|
redef enum Notice::Type += {
|
||||||
## Indicates a host bruteforcing FTP logins by watching for too many
|
## Indicates a host bruteforcing FTP logins by watching for too many
|
||||||
## rejected usernames or failed passwords.
|
## rejected usernames or failed passwords.
|
||||||
Bruteforcing
|
Bruteforcing
|
||||||
};
|
};
|
||||||
|
|
||||||
## How many rejected usernames or passwords are required before being
|
## How many rejected usernames or passwords are required before being
|
||||||
## considered to be bruteforcing.
|
## considered to be bruteforcing.
|
||||||
const bruteforce_threshold = 20 &redef;
|
const bruteforce_threshold = 20 &redef;
|
||||||
|
|
||||||
|
@ -29,17 +31,17 @@ event bro_init()
|
||||||
SumStats::create([$epoch=bruteforce_measurement_interval,
|
SumStats::create([$epoch=bruteforce_measurement_interval,
|
||||||
$reducers=set(r1),
|
$reducers=set(r1),
|
||||||
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
return result["ftp.failed_auth"]$num;
|
return result["ftp.failed_auth"]$num;
|
||||||
},
|
},
|
||||||
$threshold=bruteforce_threshold,
|
$threshold=bruteforce_threshold,
|
||||||
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
local r = result["ftp.failed_auth"];
|
local r = result["ftp.failed_auth"];
|
||||||
local dur = duration_to_mins_secs(r$end-r$begin);
|
local dur = duration_to_mins_secs(r$end-r$begin);
|
||||||
local plural = r$unique>1 ? "s" : "";
|
local plural = r$unique>1 ? "s" : "";
|
||||||
local message = fmt("%s had %d failed logins on %d FTP server%s in %s", key$host, r$num, r$unique, plural, dur);
|
local message = fmt("%s had %d failed logins on %d FTP server%s in %s", key$host, r$num, r$unique, plural, dur);
|
||||||
NOTICE([$note=FTP::Bruteforcing,
|
NOTICE([$note=FTP::Bruteforcing,
|
||||||
$src=key$host,
|
$src=key$host,
|
||||||
$msg=message,
|
$msg=message,
|
||||||
$identifier=cat(key$host)]);
|
$identifier=cat(key$host)]);
|
||||||
|
@ -54,4 +56,4 @@ event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool)
|
||||||
if ( FTP::parse_ftp_reply_code(code)$x == 5 )
|
if ( FTP::parse_ftp_reply_code(code)$x == 5 )
|
||||||
SumStats::observe("ftp.failed_auth", [$host=c$id$orig_h], [$str=cat(c$id$resp_h)]);
|
SumStats::observe("ftp.failed_auth", [$host=c$id$orig_h], [$str=cat(c$id$resp_h)]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,22 +14,22 @@ export {
|
||||||
## it. This is tracked by IP address as opposed to hostname.
|
## it. This is tracked by IP address as opposed to hostname.
|
||||||
SQL_Injection_Victim,
|
SQL_Injection_Victim,
|
||||||
};
|
};
|
||||||
|
|
||||||
redef enum Tags += {
|
redef enum Tags += {
|
||||||
## Indicator of a URI based SQL injection attack.
|
## Indicator of a URI based SQL injection attack.
|
||||||
URI_SQLI,
|
URI_SQLI,
|
||||||
## Indicator of client body based SQL injection attack. This is
|
## Indicator of client body based SQL injection attack. This is
|
||||||
## typically the body content of a POST request. Not implemented yet.
|
## typically the body content of a POST request. Not implemented yet.
|
||||||
POST_SQLI,
|
POST_SQLI,
|
||||||
## Indicator of a cookie based SQL injection attack. Not implemented yet.
|
## Indicator of a cookie based SQL injection attack. Not implemented yet.
|
||||||
COOKIE_SQLI,
|
COOKIE_SQLI,
|
||||||
};
|
};
|
||||||
|
|
||||||
## Defines the threshold that determines if an SQL injection attack
|
## Defines the threshold that determines if an SQL injection attack
|
||||||
## is ongoing based on the number of requests that appear to be SQL
|
## is ongoing based on the number of requests that appear to be SQL
|
||||||
## injection attacks.
|
## injection attacks.
|
||||||
const sqli_requests_threshold = 50 &redef;
|
const sqli_requests_threshold = 50 &redef;
|
||||||
|
|
||||||
## Interval at which to watch for the
|
## Interval at which to watch for the
|
||||||
## :bro:id:`HTTP::sqli_requests_threshold` variable to be crossed.
|
## :bro:id:`HTTP::sqli_requests_threshold` variable to be crossed.
|
||||||
## At the end of each interval the counter is reset.
|
## At the end of each interval the counter is reset.
|
||||||
|
@ -41,7 +41,7 @@ export {
|
||||||
const collect_SQLi_samples = 5 &redef;
|
const collect_SQLi_samples = 5 &redef;
|
||||||
|
|
||||||
## Regular expression is used to match URI based SQL injections.
|
## Regular expression is used to match URI based SQL injections.
|
||||||
const match_sql_injection_uri =
|
const match_sql_injection_uri =
|
||||||
/[\?&][^[:blank:]\x00-\x37\|]+?=[\-[:alnum:]%]+([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x37]|\/\*.*?\*\/|\)?;)+.*?([hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x37]|\/\*.*?\*\/)+/
|
/[\?&][^[:blank:]\x00-\x37\|]+?=[\-[:alnum:]%]+([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x37]|\/\*.*?\*\/|\)?;)+.*?([hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x37]|\/\*.*?\*\/)+/
|
||||||
| /[\?&][^[:blank:]\x00-\x37\|]+?=[\-0-9%]+([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x37]|\/\*.*?\*\/|\)?;)+([xX]?[oO][rR]|[nN]?[aA][nN][dD])([[:blank:]\x00-\x37]|\/\*.*?\*\/)+['"]?(([^a-zA-Z&]+)?=|[eE][xX][iI][sS][tT][sS])/
|
| /[\?&][^[:blank:]\x00-\x37\|]+?=[\-0-9%]+([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x37]|\/\*.*?\*\/|\)?;)+([xX]?[oO][rR]|[nN]?[aA][nN][dD])([[:blank:]\x00-\x37]|\/\*.*?\*\/)+['"]?(([^a-zA-Z&]+)?=|[eE][xX][iI][sS][tT][sS])/
|
||||||
| /[\?&][^[:blank:]\x00-\x37]+?=[\-0-9%]*([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x37]|\/\*.*?\*\/)*(-|=|\+|\|\|)([[:blank:]\x00-\x37]|\/\*.*?\*\/)*([0-9]|\(?[cC][oO][nN][vV][eE][rR][tT]|[cC][aA][sS][tT])/
|
| /[\?&][^[:blank:]\x00-\x37]+?=[\-0-9%]*([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x37]|\/\*.*?\*\/)*(-|=|\+|\|\|)([[:blank:]\x00-\x37]|\/\*.*?\*\/)*([0-9]|\(?[cC][oO][nN][vV][eE][rR][tT]|[cC][aA][sS][tT])/
|
||||||
|
@ -60,18 +60,18 @@ function format_sqli_samples(samples: vector of SumStats::Observation): string
|
||||||
|
|
||||||
event bro_init() &priority=3
|
event bro_init() &priority=3
|
||||||
{
|
{
|
||||||
# Add filters to the metrics so that the metrics framework knows how to
|
# Add filters to the metrics so that the metrics framework knows how to
|
||||||
# determine when it looks like an actual attack and how to respond when
|
# determine when it looks like an actual attack and how to respond when
|
||||||
# thresholds are crossed.
|
# thresholds are crossed.
|
||||||
local r1: SumStats::Reducer = [$stream="http.sqli.attacker", $apply=set(SumStats::SUM), $samples=collect_SQLi_samples];
|
local r1: SumStats::Reducer = [$stream="http.sqli.attacker", $apply=set(SumStats::SUM), $samples=collect_SQLi_samples];
|
||||||
SumStats::create([$epoch=sqli_requests_interval,
|
SumStats::create([$epoch=sqli_requests_interval,
|
||||||
$reducers=set(r1),
|
$reducers=set(r1),
|
||||||
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
return double_to_count(result["http.sqli.attacker"]$sum);
|
return double_to_count(result["http.sqli.attacker"]$sum);
|
||||||
},
|
},
|
||||||
$threshold=sqli_requests_threshold,
|
$threshold=sqli_requests_threshold,
|
||||||
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
local r = result["http.sqli.attacker"];
|
local r = result["http.sqli.attacker"];
|
||||||
NOTICE([$note=SQL_Injection_Attacker,
|
NOTICE([$note=SQL_Injection_Attacker,
|
||||||
|
@ -85,11 +85,11 @@ event bro_init() &priority=3
|
||||||
SumStats::create([$epoch=sqli_requests_interval,
|
SumStats::create([$epoch=sqli_requests_interval,
|
||||||
$reducers=set(r2),
|
$reducers=set(r2),
|
||||||
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
return double_to_count(result["http.sqli.victim"]$sum);
|
return double_to_count(result["http.sqli.victim"]$sum);
|
||||||
},
|
},
|
||||||
$threshold=sqli_requests_threshold,
|
$threshold=sqli_requests_threshold,
|
||||||
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
local r = result["http.sqli.victim"];
|
local r = result["http.sqli.victim"];
|
||||||
NOTICE([$note=SQL_Injection_Victim,
|
NOTICE([$note=SQL_Injection_Victim,
|
||||||
|
@ -106,7 +106,7 @@ event http_request(c: connection, method: string, original_URI: string,
|
||||||
if ( match_sql_injection_uri in unescaped_URI )
|
if ( match_sql_injection_uri in unescaped_URI )
|
||||||
{
|
{
|
||||||
add c$http$tags[URI_SQLI];
|
add c$http$tags[URI_SQLI];
|
||||||
|
|
||||||
SumStats::observe("http.sqli.attacker", [$host=c$id$orig_h], [$str=original_URI]);
|
SumStats::observe("http.sqli.attacker", [$host=c$id$orig_h], [$str=original_URI]);
|
||||||
SumStats::observe("http.sqli.victim", [$host=c$id$resp_h], [$str=original_URI]);
|
SumStats::observe("http.sqli.victim", [$host=c$id$resp_h], [$str=original_URI]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ module SSH;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Notice::Type += {
|
redef enum Notice::Type += {
|
||||||
## Indicates that a host has been identified as crossing the
|
## Indicates that a host has been identified as crossing the
|
||||||
## :bro:id:`SSH::password_guesses_limit` threshold with heuristically
|
## :bro:id:`SSH::password_guesses_limit` threshold with heuristically
|
||||||
## determined failed logins.
|
## determined failed logins.
|
||||||
Password_Guessing,
|
Password_Guessing,
|
||||||
|
@ -24,7 +24,7 @@ export {
|
||||||
## An indicator of the login for the intel framework.
|
## An indicator of the login for the intel framework.
|
||||||
SSH::SUCCESSFUL_LOGIN,
|
SSH::SUCCESSFUL_LOGIN,
|
||||||
};
|
};
|
||||||
|
|
||||||
## The number of failed SSH connections before a host is designated as
|
## The number of failed SSH connections before a host is designated as
|
||||||
## guessing passwords.
|
## guessing passwords.
|
||||||
const password_guesses_limit = 30 &redef;
|
const password_guesses_limit = 30 &redef;
|
||||||
|
@ -33,9 +33,9 @@ export {
|
||||||
## model of a password guesser.
|
## model of a password guesser.
|
||||||
const guessing_timeout = 30 mins &redef;
|
const guessing_timeout = 30 mins &redef;
|
||||||
|
|
||||||
## This value can be used to exclude hosts or entire networks from being
|
## This value can be used to exclude hosts or entire networks from being
|
||||||
## tracked as potential "guessers". There are cases where the success
|
## tracked as potential "guessers". There are cases where the success
|
||||||
## heuristic fails and this acts as the whitelist. The index represents
|
## heuristic fails and this acts as the whitelist. The index represents
|
||||||
## client subnets and the yield value represents server subnets.
|
## client subnets and the yield value represents server subnets.
|
||||||
const ignore_guessers: table[subnet] of subnet &redef;
|
const ignore_guessers: table[subnet] of subnet &redef;
|
||||||
}
|
}
|
||||||
|
@ -46,21 +46,21 @@ event bro_init()
|
||||||
SumStats::create([$epoch=guessing_timeout,
|
SumStats::create([$epoch=guessing_timeout,
|
||||||
$reducers=set(r1),
|
$reducers=set(r1),
|
||||||
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
return double_to_count(result["ssh.login.failure"]$sum);
|
return double_to_count(result["ssh.login.failure"]$sum);
|
||||||
},
|
},
|
||||||
$threshold=password_guesses_limit,
|
$threshold=password_guesses_limit,
|
||||||
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
local r = result["ssh.login.failure"];
|
local r = result["ssh.login.failure"];
|
||||||
# Generate the notice.
|
# Generate the notice.
|
||||||
NOTICE([$note=Password_Guessing,
|
NOTICE([$note=Password_Guessing,
|
||||||
$msg=fmt("%s appears to be guessing SSH passwords (seen in %d connections).", key$host, r$num),
|
$msg=fmt("%s appears to be guessing SSH passwords (seen in %d connections).", key$host, r$num),
|
||||||
$src=key$host,
|
$src=key$host,
|
||||||
$identifier=cat(key$host)]);
|
$identifier=cat(key$host)]);
|
||||||
# Insert the guesser into the intel framework.
|
# Insert the guesser into the intel framework.
|
||||||
Intel::insert([$host=key$host,
|
Intel::insert([$host=key$host,
|
||||||
$meta=[$source="local",
|
$meta=[$source="local",
|
||||||
$desc=fmt("Bro observed %d apparently failed SSH connections.", r$num)]]);
|
$desc=fmt("Bro observed %d apparently failed SSH connections.", r$num)]]);
|
||||||
}]);
|
}]);
|
||||||
}
|
}
|
||||||
|
@ -68,7 +68,7 @@ event bro_init()
|
||||||
event SSH::heuristic_successful_login(c: connection)
|
event SSH::heuristic_successful_login(c: connection)
|
||||||
{
|
{
|
||||||
local id = c$id;
|
local id = c$id;
|
||||||
|
|
||||||
Intel::seen([$host=id$orig_h,
|
Intel::seen([$host=id$orig_h,
|
||||||
$conn=c,
|
$conn=c,
|
||||||
$where=SSH::SUCCESSFUL_LOGIN]);
|
$where=SSH::SUCCESSFUL_LOGIN]);
|
||||||
|
@ -77,8 +77,8 @@ event SSH::heuristic_successful_login(c: connection)
|
||||||
event SSH::heuristic_failed_login(c: connection)
|
event SSH::heuristic_failed_login(c: connection)
|
||||||
{
|
{
|
||||||
local id = c$id;
|
local id = c$id;
|
||||||
|
|
||||||
# Add data to the FAILED_LOGIN metric unless this connection should
|
# Add data to the FAILED_LOGIN metric unless this connection should
|
||||||
# be ignored.
|
# be ignored.
|
||||||
if ( ! (id$orig_h in ignore_guessers &&
|
if ( ! (id$orig_h in ignore_guessers &&
|
||||||
id$resp_h in ignore_guessers[id$orig_h]) )
|
id$resp_h in ignore_guessers[id$orig_h]) )
|
||||||
|
|
|
@ -239,6 +239,11 @@ TableType* record_field_table;
|
||||||
|
|
||||||
StringVal* cmd_line_bpf_filter;
|
StringVal* cmd_line_bpf_filter;
|
||||||
|
|
||||||
|
OpaqueType* md5_type;
|
||||||
|
OpaqueType* sha1_type;
|
||||||
|
OpaqueType* sha256_type;
|
||||||
|
OpaqueType* entropy_type;
|
||||||
|
|
||||||
#include "const.bif.netvar_def"
|
#include "const.bif.netvar_def"
|
||||||
#include "types.bif.netvar_def"
|
#include "types.bif.netvar_def"
|
||||||
#include "event.bif.netvar_def"
|
#include "event.bif.netvar_def"
|
||||||
|
@ -298,6 +303,11 @@ void init_general_global_var()
|
||||||
|
|
||||||
cmd_line_bpf_filter =
|
cmd_line_bpf_filter =
|
||||||
internal_val("cmd_line_bpf_filter")->AsStringVal();
|
internal_val("cmd_line_bpf_filter")->AsStringVal();
|
||||||
|
|
||||||
|
md5_type = new OpaqueType("md5");
|
||||||
|
sha1_type = new OpaqueType("sha1");
|
||||||
|
sha256_type = new OpaqueType("sha256");
|
||||||
|
entropy_type = new OpaqueType("entropy");
|
||||||
}
|
}
|
||||||
|
|
||||||
void init_net_var()
|
void init_net_var()
|
||||||
|
@ -346,7 +356,7 @@ void init_net_var()
|
||||||
opt_internal_int("tcp_excessive_data_without_further_acks");
|
opt_internal_int("tcp_excessive_data_without_further_acks");
|
||||||
|
|
||||||
x509_type = internal_type("X509")->AsRecordType();
|
x509_type = internal_type("X509")->AsRecordType();
|
||||||
|
|
||||||
socks_address = internal_type("SOCKS::Address")->AsRecordType();
|
socks_address = internal_type("SOCKS::Address")->AsRecordType();
|
||||||
|
|
||||||
non_analyzed_lifetime = opt_internal_double("non_analyzed_lifetime");
|
non_analyzed_lifetime = opt_internal_double("non_analyzed_lifetime");
|
||||||
|
|
|
@ -243,6 +243,12 @@ extern TableType* record_field_table;
|
||||||
|
|
||||||
extern StringVal* cmd_line_bpf_filter;
|
extern StringVal* cmd_line_bpf_filter;
|
||||||
|
|
||||||
|
class OpaqueType;
|
||||||
|
extern OpaqueType* md5_type;
|
||||||
|
extern OpaqueType* sha1_type;
|
||||||
|
extern OpaqueType* sha256_type;
|
||||||
|
extern OpaqueType* entropy_type;
|
||||||
|
|
||||||
// Initializes globals that don't pertain to network/event analysis.
|
// Initializes globals that don't pertain to network/event analysis.
|
||||||
extern void init_general_global_var();
|
extern void init_general_global_var();
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
#include "OpaqueVal.h"
|
#include "OpaqueVal.h"
|
||||||
|
#include "NetVar.h"
|
||||||
#include "Reporter.h"
|
#include "Reporter.h"
|
||||||
#include "Serializer.h"
|
#include "Serializer.h"
|
||||||
#include "HyperLogLog.h"
|
#include "HyperLogLog.h"
|
||||||
|
@ -144,6 +145,10 @@ bool HashVal::DoUnserialize(UnserialInfo* info)
|
||||||
return UNSERIALIZE(&valid);
|
return UNSERIALIZE(&valid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MD5Val::MD5Val() : HashVal(md5_type)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
void MD5Val::digest(val_list& vlist, u_char result[MD5_DIGEST_LENGTH])
|
void MD5Val::digest(val_list& vlist, u_char result[MD5_DIGEST_LENGTH])
|
||||||
{
|
{
|
||||||
MD5_CTX h;
|
MD5_CTX h;
|
||||||
|
@ -261,6 +266,10 @@ bool MD5Val::DoUnserialize(UnserialInfo* info)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SHA1Val::SHA1Val() : HashVal(sha1_type)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
void SHA1Val::digest(val_list& vlist, u_char result[SHA_DIGEST_LENGTH])
|
void SHA1Val::digest(val_list& vlist, u_char result[SHA_DIGEST_LENGTH])
|
||||||
{
|
{
|
||||||
SHA_CTX h;
|
SHA_CTX h;
|
||||||
|
@ -369,6 +378,10 @@ bool SHA1Val::DoUnserialize(UnserialInfo* info)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SHA256Val::SHA256Val() : HashVal(sha256_type)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
void SHA256Val::digest(val_list& vlist, u_char result[SHA256_DIGEST_LENGTH])
|
void SHA256Val::digest(val_list& vlist, u_char result[SHA256_DIGEST_LENGTH])
|
||||||
{
|
{
|
||||||
SHA256_CTX h;
|
SHA256_CTX h;
|
||||||
|
@ -482,6 +495,9 @@ bool SHA256Val::DoUnserialize(UnserialInfo* info)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
EntropyVal::EntropyVal() : OpaqueVal(entropy_type)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
bool EntropyVal::Feed(const void* data, size_t size)
|
bool EntropyVal::Feed(const void* data, size_t size)
|
||||||
{
|
{
|
||||||
|
|
|
@ -54,7 +54,7 @@ public:
|
||||||
u_char key[MD5_DIGEST_LENGTH],
|
u_char key[MD5_DIGEST_LENGTH],
|
||||||
u_char result[MD5_DIGEST_LENGTH]);
|
u_char result[MD5_DIGEST_LENGTH]);
|
||||||
|
|
||||||
MD5Val() : HashVal(new OpaqueType("md5")) { }
|
MD5Val();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
friend class Val;
|
friend class Val;
|
||||||
|
@ -73,7 +73,7 @@ class SHA1Val : public HashVal {
|
||||||
public:
|
public:
|
||||||
static void digest(val_list& vlist, u_char result[SHA_DIGEST_LENGTH]);
|
static void digest(val_list& vlist, u_char result[SHA_DIGEST_LENGTH]);
|
||||||
|
|
||||||
SHA1Val() : HashVal(new OpaqueType("sha1")) { }
|
SHA1Val();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
friend class Val;
|
friend class Val;
|
||||||
|
@ -92,7 +92,7 @@ class SHA256Val : public HashVal {
|
||||||
public:
|
public:
|
||||||
static void digest(val_list& vlist, u_char result[SHA256_DIGEST_LENGTH]);
|
static void digest(val_list& vlist, u_char result[SHA256_DIGEST_LENGTH]);
|
||||||
|
|
||||||
SHA256Val() : HashVal(new OpaqueType("sha256")) { }
|
SHA256Val();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
friend class Val;
|
friend class Val;
|
||||||
|
@ -109,7 +109,7 @@ private:
|
||||||
|
|
||||||
class EntropyVal : public OpaqueVal {
|
class EntropyVal : public OpaqueVal {
|
||||||
public:
|
public:
|
||||||
EntropyVal() : OpaqueVal(new OpaqueType("entropy")) { }
|
EntropyVal();
|
||||||
|
|
||||||
bool Feed(const void* data, size_t size);
|
bool Feed(const void* data, size_t size);
|
||||||
bool Get(double *r_ent, double *r_chisq, double *r_mean,
|
bool Get(double *r_ent, double *r_chisq, double *r_mean,
|
||||||
|
|
21
src/TCP.cc
21
src/TCP.cc
|
@ -566,7 +566,7 @@ void TCP_Analyzer::UpdateInactiveState(double t,
|
||||||
else
|
else
|
||||||
endpoint->SetState(TCP_ENDPOINT_SYN_SENT);
|
endpoint->SetState(TCP_ENDPOINT_SYN_SENT);
|
||||||
|
|
||||||
if ( connection_attempt )
|
if ( tcp_attempt_delay )
|
||||||
ADD_ANALYZER_TIMER(&TCP_Analyzer::AttemptTimer,
|
ADD_ANALYZER_TIMER(&TCP_Analyzer::AttemptTimer,
|
||||||
t + tcp_attempt_delay, 1,
|
t + tcp_attempt_delay, 1,
|
||||||
TIMER_TCP_ATTEMPT);
|
TIMER_TCP_ATTEMPT);
|
||||||
|
@ -1497,24 +1497,7 @@ void TCP_Analyzer::ExpireTimer(double t)
|
||||||
|
|
||||||
if ( resp->state == TCP_ENDPOINT_INACTIVE )
|
if ( resp->state == TCP_ENDPOINT_INACTIVE )
|
||||||
{
|
{
|
||||||
if ( (orig->state == TCP_ENDPOINT_SYN_SENT ||
|
if ( orig->state == TCP_ENDPOINT_INACTIVE )
|
||||||
orig->state == TCP_ENDPOINT_SYN_ACK_SENT) )
|
|
||||||
{
|
|
||||||
if ( ! connection_attempt )
|
|
||||||
{
|
|
||||||
// Time out the connection attempt,
|
|
||||||
// since the AttemptTimer isn't going
|
|
||||||
// to do it for us, and we don't want
|
|
||||||
// to clog the data structures with
|
|
||||||
// old, failed attempts.
|
|
||||||
Event(connection_timeout);
|
|
||||||
is_active = 0;
|
|
||||||
sessions->Remove(Conn());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
else if ( orig->state == TCP_ENDPOINT_INACTIVE )
|
|
||||||
{
|
{
|
||||||
// Nothing ever happened on this connection.
|
// Nothing ever happened on this connection.
|
||||||
// This can occur when we see a trashed
|
// This can occur when we see a trashed
|
||||||
|
|
|
@ -1749,7 +1749,7 @@ Val* TableVal::Default(Val* index)
|
||||||
|
|
||||||
if ( def_val->Type()->Tag() != TYPE_FUNC ||
|
if ( def_val->Type()->Tag() != TYPE_FUNC ||
|
||||||
same_type(def_val->Type(), Type()->YieldType()) )
|
same_type(def_val->Type(), Type()->YieldType()) )
|
||||||
return def_val->Ref();
|
return def_attr->AttrExpr()->IsConst() ? def_val->Ref() : def_val->Clone();
|
||||||
|
|
||||||
const Func* f = def_val->AsFunc();
|
const Func* f = def_val->AsFunc();
|
||||||
val_list* vl = new val_list();
|
val_list* vl = new val_list();
|
||||||
|
|
7
testing/btest/Baseline/language.table-default-record/out
Normal file
7
testing/btest/Baseline/language.table-default-record/out
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
0
|
||||||
|
0
|
||||||
|
0
|
||||||
|
0
|
||||||
|
{
|
||||||
|
|
||||||
|
}
|
|
@ -1 +1,3 @@
|
||||||
A test metric threshold was crossed with a value of: 100.0
|
A test metric threshold was crossed with a value of: 101.0
|
||||||
|
End of epoch handler was called
|
||||||
|
101.0
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
#empty_field (empty)
|
#empty_field (empty)
|
||||||
#unset_field -
|
#unset_field -
|
||||||
#path socks
|
#path socks
|
||||||
#open 2012-06-20-17-23-38
|
#open 2013-05-02-01-02-50
|
||||||
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version user status request.host request.name request_p bound.host bound.name bound_p
|
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version user status request.host request.name request_p bound.host bound.name bound_p
|
||||||
#types time string addr port addr port count string string addr string port addr string port
|
#types time string addr port addr port count string string addr string port addr string port
|
||||||
1340213015.276495 UWkUyAuUGXf 10.0.0.55 53994 60.190.189.214 8124 5 - succeeded - www.osnews.com 80 192.168.0.31 - 2688
|
1340213015.276495 arKYeMETxOg 10.0.0.55 53994 60.190.189.214 8124 5 - succeeded - www.osnews.com 80 192.168.0.31 - 2688
|
||||||
#close 2012-06-20-17-28-10
|
#close 2013-05-02-01-02-50
|
||||||
|
|
|
@ -3,9 +3,9 @@
|
||||||
#empty_field (empty)
|
#empty_field (empty)
|
||||||
#unset_field -
|
#unset_field -
|
||||||
#path notice
|
#path notice
|
||||||
#open 2013-04-25-18-55-26
|
#open 2013-04-28-22-36-26
|
||||||
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network
|
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude
|
||||||
#types time string addr port addr port enum enum string string addr addr port count string table[enum] interval bool string string string double double addr string subnet
|
#types time string addr port addr port enum enum string string addr addr port count string table[enum] interval bool string string string double double
|
||||||
1366916126.685057 - - - - - - Software::Vulnerable_Version 1.2.3.4 is running Java 1.7.0.15 which is vulnerable. Java 1.7.0.15 1.2.3.4 - - - bro Notice::ACTION_LOG 3600.000000 F - - - - - - - -
|
1367188586.649122 - - - - - - Software::Vulnerable_Version 1.2.3.4 is running Java 1.7.0.15 which is vulnerable. Java 1.7.0.15 1.2.3.4 - - - bro Notice::ACTION_LOG 3600.000000 F - - - - -
|
||||||
1366916126.685057 - - - - - - Software::Vulnerable_Version 1.2.3.5 is running Java 1.6.0.43 which is vulnerable. Java 1.6.0.43 1.2.3.5 - - - bro Notice::ACTION_LOG 3600.000000 F - - - - - - - -
|
1367188586.649122 - - - - - - Software::Vulnerable_Version 1.2.3.5 is running Java 1.6.0.43 which is vulnerable. Java 1.6.0.43 1.2.3.5 - - - bro Notice::ACTION_LOG 3600.000000 F - - - - -
|
||||||
#close 2013-04-25-18-55-26
|
#close 2013-04-28-22-36-26
|
||||||
|
|
|
@ -6,33 +6,38 @@
|
||||||
# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks
|
# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks
|
||||||
#
|
#
|
||||||
# @TEST-EXEC: btest-bg-run manager-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro -m %INPUT
|
# @TEST-EXEC: btest-bg-run manager-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro -m %INPUT
|
||||||
# @TEST-EXEC: btest-bg-run proxy-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro -m %INPUT
|
|
||||||
# @TEST-EXEC: sleep 1
|
# @TEST-EXEC: sleep 1
|
||||||
# @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT
|
# @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro -m %INPUT
|
||||||
# @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT
|
# @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro -m %INPUT
|
||||||
# @TEST-EXEC: btest-bg-wait 60
|
# @TEST-EXEC: btest-bg-wait 15
|
||||||
# @TEST-EXEC: btest-diff manager-1/metrics.log
|
|
||||||
|
|
||||||
@TEST-START-FILE cluster-layout.bro
|
@TEST-START-FILE cluster-layout.bro
|
||||||
redef Cluster::nodes = {
|
redef Cluster::nodes = {
|
||||||
["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1", "worker-2")],
|
["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1", "worker-2")],
|
||||||
["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=37758/tcp, $manager="manager-1", $workers=set("worker-1", "worker-2")],
|
["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $interface="eth0"],
|
||||||
["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth0"],
|
["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $interface="eth1"],
|
||||||
["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth1"],
|
|
||||||
};
|
};
|
||||||
@TEST-END-FILE
|
@TEST-END-FILE
|
||||||
|
|
||||||
redef Log::default_rotation_interval = 0secs;
|
redef Log::default_rotation_interval = 0secs;
|
||||||
|
|
||||||
redef enum Metrics::ID += {
|
global n = 0;
|
||||||
TEST_METRIC,
|
|
||||||
};
|
|
||||||
|
|
||||||
event bro_init() &priority=5
|
event bro_init() &priority=5
|
||||||
{
|
{
|
||||||
Metrics::add_filter(TEST_METRIC,
|
local r1: SumStats::Reducer = [$stream="test", $apply=set(SumStats::SUM, SumStats::MIN, SumStats::MAX, SumStats::AVERAGE, SumStats::STD_DEV, SumStats::VARIANCE, SumStats::UNIQUE)];
|
||||||
[$name="foo-bar",
|
SumStats::create([$epoch=5secs,
|
||||||
$break_interval=3secs]);
|
$reducers=set(r1),
|
||||||
|
$epoch_finished(rt: SumStats::ResultTable) =
|
||||||
|
{
|
||||||
|
for ( key in rt )
|
||||||
|
{
|
||||||
|
local r = rt[key]["test"];
|
||||||
|
print fmt("Host: %s - num:%d - sum:%.1f - avg:%.1f - max:%.1f - min:%.1f - var:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$average, r$max, r$min, r$variance, r$std_dev, r$unique);
|
||||||
|
}
|
||||||
|
|
||||||
|
terminate();
|
||||||
|
}]);
|
||||||
}
|
}
|
||||||
|
|
||||||
event remote_connection_closed(p: event_peer)
|
event remote_connection_closed(p: event_peer)
|
||||||
|
@ -41,43 +46,40 @@ event remote_connection_closed(p: event_peer)
|
||||||
}
|
}
|
||||||
|
|
||||||
global ready_for_data: event();
|
global ready_for_data: event();
|
||||||
|
redef Cluster::manager2worker_events += /^ready_for_data$/;
|
||||||
redef Cluster::manager2worker_events += /ready_for_data/;
|
|
||||||
|
|
||||||
@if ( Cluster::local_node_type() == Cluster::WORKER )
|
|
||||||
|
|
||||||
event ready_for_data()
|
event ready_for_data()
|
||||||
{
|
{
|
||||||
Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3);
|
if ( Cluster::node == "worker-1" )
|
||||||
Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2);
|
{
|
||||||
Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1);
|
SumStats::observe("test", [$host=1.2.3.4], [$num=34]);
|
||||||
|
SumStats::observe("test", [$host=1.2.3.4], [$num=30]);
|
||||||
|
SumStats::observe("test", [$host=6.5.4.3], [$num=1]);
|
||||||
|
SumStats::observe("test", [$host=7.2.1.5], [$num=54]);
|
||||||
|
}
|
||||||
|
if ( Cluster::node == "worker-2" )
|
||||||
|
{
|
||||||
|
SumStats::observe("test", [$host=1.2.3.4], [$num=75]);
|
||||||
|
SumStats::observe("test", [$host=1.2.3.4], [$num=30]);
|
||||||
|
SumStats::observe("test", [$host=1.2.3.4], [$num=3]);
|
||||||
|
SumStats::observe("test", [$host=1.2.3.4], [$num=57]);
|
||||||
|
SumStats::observe("test", [$host=1.2.3.4], [$num=52]);
|
||||||
|
SumStats::observe("test", [$host=1.2.3.4], [$num=61]);
|
||||||
|
SumStats::observe("test", [$host=1.2.3.4], [$num=95]);
|
||||||
|
SumStats::observe("test", [$host=6.5.4.3], [$num=5]);
|
||||||
|
SumStats::observe("test", [$host=7.2.1.5], [$num=91]);
|
||||||
|
SumStats::observe("test", [$host=10.10.10.10], [$num=5]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@endif
|
|
||||||
|
|
||||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||||
|
|
||||||
global n = 0;
|
|
||||||
global peer_count = 0;
|
global peer_count = 0;
|
||||||
|
event remote_connection_handshake_done(p: event_peer) &priority=-5
|
||||||
event Metrics::log_metrics(rec: Metrics::Info)
|
|
||||||
{
|
{
|
||||||
n = n + 1;
|
++peer_count;
|
||||||
if ( n == 3 )
|
if ( peer_count == 2 )
|
||||||
{
|
|
||||||
terminate_communication();
|
|
||||||
terminate();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
event remote_connection_handshake_done(p: event_peer)
|
|
||||||
{
|
|
||||||
print p;
|
|
||||||
peer_count = peer_count + 1;
|
|
||||||
if ( peer_count == 3 )
|
|
||||||
{
|
|
||||||
event ready_for_data();
|
event ready_for_data();
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@endif
|
@endif
|
||||||
|
|
|
@ -3,12 +3,13 @@
|
||||||
# scripts that block after loading, e.g. start listening on a socket.
|
# scripts that block after loading, e.g. start listening on a socket.
|
||||||
#
|
#
|
||||||
# Commonly, this test may fail if one forgets to @load some base/ scripts
|
# Commonly, this test may fail if one forgets to @load some base/ scripts
|
||||||
# when writing a new bro scripts.
|
# when writing a new bro scripts. Look into "allerrors" to find out
|
||||||
|
# which script had trouble.
|
||||||
#
|
#
|
||||||
# @TEST-SERIALIZE: comm
|
# @TEST-SERIALIZE: comm
|
||||||
#
|
#
|
||||||
# @TEST-EXEC: test -d $DIST/scripts
|
# @TEST-EXEC: test -d $DIST/scripts
|
||||||
# @TEST-EXEC: for script in `find $DIST/scripts/ -name \*\.bro -not -path '*/site/*'`; do echo $script; if echo "$script" | egrep -q 'communication/listen|controllee'; then rm -rf load_attempt .bgprocs; btest-bg-run load_attempt bro -b $script; btest-bg-wait -k 2; cat load_attempt/.stderr >>allerrors; else bro -b $script 2>>allerrors; fi done || exit 0
|
# @TEST-EXEC: for script in `find $DIST/scripts/ -name \*\.bro -not -path '*/site/*'`; do echo "=== $script" >>allerrors; if echo "$script" | egrep -q 'communication/listen|controllee'; then rm -rf load_attempt .bgprocs; btest-bg-run load_attempt bro -b $script; btest-bg-wait -k 2; cat load_attempt/.stderr >>allerrors; else bro -b $script 2>>allerrors; fi done || exit 0
|
||||||
# @TEST-EXEC: cat allerrors | grep -v "received termination signal" | sort | uniq > unique_errors
|
# @TEST-EXEC: cat allerrors | grep -v "received termination signal" | grep -v '===' | sort | uniq > unique_errors
|
||||||
# @TEST-EXEC: if [ $(grep -c LibCURL_INCLUDE_DIR-NOTFOUND $BUILD/CMakeCache.txt) -ne 0 ]; then cp unique_errors unique_errors_no_elasticsearch; fi
|
# @TEST-EXEC: if [ $(grep -c LibCURL_INCLUDE_DIR-NOTFOUND $BUILD/CMakeCache.txt) -ne 0 ]; then cp unique_errors unique_errors_no_elasticsearch; fi
|
||||||
# @TEST-EXEC: if [ $(grep -c LibCURL_INCLUDE_DIR-NOTFOUND $BUILD/CMakeCache.txt) -ne 0 ]; then btest-diff unique_errors_no_elasticsearch; else btest-diff unique_errors; fi
|
# @TEST-EXEC: if [ $(grep -c LibCURL_INCLUDE_DIR-NOTFOUND $BUILD/CMakeCache.txt) -ne 0 ]; then btest-diff unique_errors_no_elasticsearch; else btest-diff unique_errors; fi
|
||||||
|
|
24
testing/btest/language/table-default-record.bro
Normal file
24
testing/btest/language/table-default-record.bro
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
# @TEST-EXEC: bro -b %INPUT >out
|
||||||
|
# @TEST-EXEC: btest-diff out
|
||||||
|
|
||||||
|
type Foo: record {
|
||||||
|
x: count &default=0;
|
||||||
|
};
|
||||||
|
|
||||||
|
global foo: table[count] of Foo = {} &default=[];
|
||||||
|
|
||||||
|
# returns the &default value as usual
|
||||||
|
print(foo[0]$x);
|
||||||
|
print(foo[1]$x);
|
||||||
|
|
||||||
|
# these are essentially no-ops since a copy of the &default value is returned
|
||||||
|
# by the lookup
|
||||||
|
foo[0]$x = 0;
|
||||||
|
foo[1]$x = 1;
|
||||||
|
|
||||||
|
# the &default value isn't modified
|
||||||
|
print(foo[0]$x);
|
||||||
|
print(foo[1]$x);
|
||||||
|
|
||||||
|
# table membership isn't modified
|
||||||
|
print(foo);
|
|
@ -4,7 +4,7 @@
|
||||||
# @TEST-EXEC: sleep 3
|
# @TEST-EXEC: sleep 3
|
||||||
# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT
|
# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT
|
||||||
# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT
|
# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT
|
||||||
# @TEST-EXEC: btest-bg-wait 10
|
# @TEST-EXEC: btest-bg-wait 20
|
||||||
# @TEST-EXEC: btest-diff manager-1/.stdout
|
# @TEST-EXEC: btest-diff manager-1/.stdout
|
||||||
|
|
||||||
@TEST-START-FILE cluster-layout.bro
|
@TEST-START-FILE cluster-layout.bro
|
||||||
|
@ -20,8 +20,15 @@ redef Log::default_rotation_interval = 0secs;
|
||||||
event bro_init() &priority=5
|
event bro_init() &priority=5
|
||||||
{
|
{
|
||||||
local r1: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)];
|
local r1: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)];
|
||||||
SumStats::create([$epoch=1hr,
|
SumStats::create([$epoch=10secs,
|
||||||
$reducers=set(r1),
|
$reducers=set(r1),
|
||||||
|
$epoch_finished(data: SumStats::ResultTable) =
|
||||||
|
{
|
||||||
|
print "End of epoch handler was called";
|
||||||
|
for ( res in data )
|
||||||
|
print data[res]["test.metric"]$sum;
|
||||||
|
terminate();
|
||||||
|
},
|
||||||
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
return double_to_count(result["test.metric"]$sum);
|
return double_to_count(result["test.metric"]$sum);
|
||||||
|
@ -30,7 +37,6 @@ event bro_init() &priority=5
|
||||||
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
|
||||||
{
|
{
|
||||||
print fmt("A test metric threshold was crossed with a value of: %.1f", result["test.metric"]$sum);
|
print fmt("A test metric threshold was crossed with a value of: %.1f", result["test.metric"]$sum);
|
||||||
terminate();
|
|
||||||
}]);
|
}]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,8 +58,13 @@ event remote_connection_handshake_done(p: event_peer)
|
||||||
if ( p$descr == "manager-1" )
|
if ( p$descr == "manager-1" )
|
||||||
{
|
{
|
||||||
if ( Cluster::node == "worker-1" )
|
if ( Cluster::node == "worker-1" )
|
||||||
|
{
|
||||||
schedule 0.1sec { do_stats(1) };
|
schedule 0.1sec { do_stats(1) };
|
||||||
|
schedule 5secs { do_stats(60) };
|
||||||
|
}
|
||||||
if ( Cluster::node == "worker-2" )
|
if ( Cluster::node == "worker-2" )
|
||||||
schedule 0.5sec { do_stats(99) };
|
schedule 0.5sec { do_stats(40) };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue