Merge branch 'master' into topic/robin/dnp3-merge-v3

Conflicts:
	scripts/base/init-default.bro
This commit is contained in:
Robin Sommer 2013-08-09 17:11:51 -07:00
commit 0e7f51f78c
90 changed files with 1841 additions and 923 deletions

66
CHANGES
View file

@ -1,4 +1,70 @@
2.1-1041 | 2013-08-09 15:32:22 -0700
* Update coverage baselines for canonical load order of scripts.
(Jon Siwek)
2.1-1039 | 2013-08-09 15:30:15 -0700
* Fix mem leak in DHCP analyzer. (Jon Siwek)
* Fix a unit test outdated by recent sumstats changes. (Jon Siwek)
2.1-1036 | 2013-08-05 17:29:11 -0400
* Fix the SSL infinite loop I just created. (Seth Hall)
2.1-1035 | 2013-08-05 16:44:50 -0400
* Change to SSL log delay to cause the log to write even if delay times out. (Seth Hall)
2.1-1034 | 2013-08-03 20:27:43 -0700
* A set of DHCP extensions. (Vlad Grigorescu)
- Leases are logged to dhcp.log as they are seen.
- scripts/policy/protocols/dhcp/known-devices-and-hostnames.bro
- Added DPD sig.
2.1-1027 | 2013-08-03 01:57:37 -0400
* Fix a major memory issue in the SumStats framework.
2.1-1026 | 2013-08-02 22:35:09 -0400
* Fix the SumStats top-k plugin and test. (Seth Hall)
* Rework of SumStats API to reduce high instantaneous memory
use on clusters. (Seth Hall)
* Large update for the SumStats framework.
- On-demand access to sumstats results through "return from"
functions named SumStats::request and Sumstats::request_key.
Both functions are tested in standalone and clustered modes.
- $name field has returned to SumStats which simplifies cluster
code and makes the on-demand access stuff possible.
- Clustered results can only be collected for 1 minute from their
time of creation now instead of time of last read.
- Thresholds use doubles instead of counts everywhere now.
- Calculation dependency resolution occurs at start up time now
instead of doing it at observation time which provide a minor
cpu performance improvement. A new plugin registration mechanism
was created to support this change.
- AppStats now has a minimal doc string and is broken into hook-based
plugins.
- AppStats and traceroute detection added to local.bro (Seth Hall)
2.1-1009 | 2013-08-02 17:19:08 -0700
* A number of exec module and raw input reader fixes. (Jon Siwek)
2.1-1007 | 2013-08-01 15:41:54 -0700
* More function documentation. (Bernhard Amann)

View file

@ -1 +1 @@
2.1-1007
2.1-1041

@ -1 +1 @@
Subproject commit 314fa8f65fc240e960c23c3bba98623436a72b98
Subproject commit 00674ed07d702252b00675b5060647d9e811cdd7

@ -1 +1 @@
Subproject commit d9963983c0b4d426b24836f8d154d014d5aecbba
Subproject commit 0e2d74e488195170e4648037e22b51e122dc7b0e

@ -1 +1 @@
Subproject commit d59c73b6e0966ad63bbc63a35741b5f68263e7b1
Subproject commit 7ddfa3212d1fd0822588d4a96158f1a30c755afe

@ -1 +1 @@
Subproject commit 52fd91261f41fa1528f7b964837a364d7991889e
Subproject commit c9293bad3bf4d6fc3e1808a315e791140a632961

2
cmake

@ -1 +1 @@
Subproject commit 026639f8368e56742c0cb5d9fb390ea64e60ec50
Subproject commit 370f7efc6e144f978e2309ad80b1a10c83feaaa9

View file

@ -139,6 +139,9 @@ rest_target(${psd} base/protocols/conn/contents.bro)
rest_target(${psd} base/protocols/conn/inactivity.bro)
rest_target(${psd} base/protocols/conn/main.bro)
rest_target(${psd} base/protocols/conn/polling.bro)
rest_target(${psd} base/protocols/dhcp/consts.bro)
rest_target(${psd} base/protocols/dhcp/main.bro)
rest_target(${psd} base/protocols/dhcp/utils.bro)
rest_target(${psd} base/protocols/dns/consts.bro)
rest_target(${psd} base/protocols/dns/main.bro)
rest_target(${psd} base/protocols/ftp/files.bro)
@ -206,9 +209,16 @@ rest_target(${psd} policy/frameworks/software/vulnerable.bro)
rest_target(${psd} policy/integration/barnyard2/main.bro)
rest_target(${psd} policy/integration/barnyard2/types.bro)
rest_target(${psd} policy/integration/collective-intel/main.bro)
rest_target(${psd} policy/misc/app-metrics.bro)
rest_target(${psd} policy/misc/app-stats/main.bro)
rest_target(${psd} policy/misc/app-stats/plugins/facebook.bro)
rest_target(${psd} policy/misc/app-stats/plugins/gmail.bro)
rest_target(${psd} policy/misc/app-stats/plugins/google.bro)
rest_target(${psd} policy/misc/app-stats/plugins/netflix.bro)
rest_target(${psd} policy/misc/app-stats/plugins/pandora.bro)
rest_target(${psd} policy/misc/app-stats/plugins/youtube.bro)
rest_target(${psd} policy/misc/capture-loss.bro)
rest_target(${psd} policy/misc/detect-traceroute/main.bro)
rest_target(${psd} policy/misc/known-devices.bro)
rest_target(${psd} policy/misc/load-balancing.bro)
rest_target(${psd} policy/misc/loaded-scripts.bro)
rest_target(${psd} policy/misc/profiling.bro)
@ -218,6 +228,7 @@ rest_target(${psd} policy/misc/trim-trace-file.bro)
rest_target(${psd} policy/protocols/conn/known-hosts.bro)
rest_target(${psd} policy/protocols/conn/known-services.bro)
rest_target(${psd} policy/protocols/conn/weirds.bro)
rest_target(${psd} policy/protocols/dhcp/known-devices-and-hostnames.bro)
rest_target(${psd} policy/protocols/dns/auth-addl.bro)
rest_target(${psd} policy/protocols/dns/detect-external-names.bro)
rest_target(${psd} policy/protocols/ftp/detect-bruteforcing.bro)

View file

@ -10,10 +10,6 @@
module SumStats;
export {
## Allows a user to decide how large of result groups the workers should transmit
## values for cluster stats aggregation.
const cluster_send_in_groups_of = 50 &redef;
## The percent of the full threshold value that needs to be met on a single worker
## for that worker to send the value to its manager in order for it to request a
## global view for that value. There is no requirement that the manager requests
@ -27,45 +23,46 @@ export {
## performed. In practice this should hopefully have a minimal effect.
const max_outstanding_global_views = 10 &redef;
## Intermediate updates can cause overload situations on very large clusters. This
## option may help reduce load and correct intermittent problems. The goal for this
## option is also meant to be temporary.
const enable_intermediate_updates = T &redef;
## Event sent by the manager in a cluster to initiate the collection of values for
## a sumstat.
global cluster_ss_request: event(uid: string, ssid: string);
global cluster_ss_request: event(uid: string, ss_name: string, cleanup: bool);
## Event sent by nodes that are collecting sumstats after receiving a request for
## the sumstat from the manager.
global cluster_ss_response: event(uid: string, ssid: string, data: ResultTable, done: bool);
#global cluster_ss_response: event(uid: string, ss_name: string, data: ResultTable, done: bool, cleanup: bool);
## This event is sent by the manager in a cluster to initiate the collection of
## a single key value from a sumstat. It's typically used to get intermediate
## updates before the break interval triggers to speed detection of a value
## crossing a threshold.
global cluster_key_request: event(uid: string, ssid: string, key: Key);
global cluster_get_result: event(uid: string, ss_name: string, key: Key, cleanup: bool);
## This event is sent by nodes in response to a
## :bro:id:`SumStats::cluster_key_request` event.
global cluster_key_response: event(uid: string, ssid: string, key: Key, result: Result);
## :bro:id:`SumStats::cluster_get_result` event.
global cluster_send_result: event(uid: string, ss_name: string, key: Key, result: Result, cleanup: bool);
## This is sent by workers to indicate that they crossed the percent
## of the current threshold by the percentage defined globally in
## :bro:id:`SumStats::cluster_request_global_view_percent`
global cluster_key_intermediate_response: event(ssid: string, key: SumStats::Key);
global cluster_key_intermediate_response: event(ss_name: string, key: SumStats::Key);
## This event is scheduled internally on workers to send result chunks.
global send_data: event(uid: string, ssid: string, data: ResultTable);
global send_data: event(uid: string, ss_name: string, data: ResultTable, cleanup: bool);
global get_a_key: event(uid: string, ss_name: string, cleanup: bool &default=F);
global send_a_key: event(uid: string, ss_name: string, key: Key);
global send_no_key: event(uid: string, ss_name: string);
## This event is generated when a threshold is crossed.
global cluster_threshold_crossed: event(ssid: string, key: SumStats::Key, thold: Thresholding);
global cluster_threshold_crossed: event(ss_name: string, key: SumStats::Key, thold_index: count);
}
# Add events to the cluster framework to make this work.
redef Cluster::manager2worker_events += /SumStats::cluster_(ss_request|key_request|threshold_crossed)/;
redef Cluster::manager2worker_events += /SumStats::thresholds_reset/;
redef Cluster::worker2manager_events += /SumStats::cluster_(ss_response|key_response|key_intermediate_response)/;
redef Cluster::manager2worker_events += /SumStats::cluster_(ss_request|get_result|threshold_crossed)/;
redef Cluster::manager2worker_events += /SumStats::(thresholds_reset|get_a_key)/;
redef Cluster::worker2manager_events += /SumStats::cluster_(ss_response|send_result|key_intermediate_response)/;
redef Cluster::worker2manager_events += /SumStats::(send_a_key|send_no_key)/;
@if ( Cluster::local_node_type() != Cluster::MANAGER )
# This variable is maintained to know what keys have recently sent as
@ -74,12 +71,9 @@ redef Cluster::worker2manager_events += /SumStats::cluster_(ss_response|key_resp
# an intermediate result has been received.
global recent_global_view_keys: table[string, Key] of count &create_expire=1min &default=0;
event bro_init() &priority=-100
{
# The manager is the only host allowed to track these.
stats_store = table();
reducer_store = table();
}
# Result tables indexed on a uid that are currently being sent to the
# manager.
global sending_results: table[string] of ResultTable = table() &create_expire=1min;
# This is done on all non-manager node types in the event that a sumstat is
# being collected somewhere other than a worker.
@ -87,95 +81,151 @@ function data_added(ss: SumStat, key: Key, result: Result)
{
# If an intermediate update for this value was sent recently, don't send
# it again.
if ( [ss$id, key] in recent_global_view_keys )
if ( [ss$name, key] in recent_global_view_keys )
return;
# If val is 5 and global view % is 0.1 (10%), pct_val will be 50. If that
# crosses the full threshold then it's a candidate to send as an
# intermediate update.
if ( enable_intermediate_updates &&
check_thresholds(ss, key, result, cluster_request_global_view_percent) )
if ( check_thresholds(ss, key, result, cluster_request_global_view_percent) )
{
# kick off intermediate update
event SumStats::cluster_key_intermediate_response(ss$id, key);
++recent_global_view_keys[ss$id, key];
event SumStats::cluster_key_intermediate_response(ss$name, key);
++recent_global_view_keys[ss$name, key];
}
}
event SumStats::send_data(uid: string, ssid: string, data: ResultTable)
#event SumStats::send_data(uid: string, ss_name: string, cleanup: bool)
# {
# #print fmt("WORKER %s: sending data for uid %s...", Cluster::node, uid);
#
# local local_data: ResultTable = table();
# local incoming_data: ResultTable = cleanup ? data : copy(data);
#
# local num_added = 0;
# for ( key in incoming_data )
# {
# local_data[key] = incoming_data[key];
# delete incoming_data[key];
#
# # Only send cluster_send_in_groups_of at a time. Queue another
# # event to send the next group.
# if ( cluster_send_in_groups_of == ++num_added )
# break;
# }
#
# local done = F;
# # If data is empty, this sumstat is done.
# if ( |incoming_data| == 0 )
# done = T;
#
# # Note: copy is needed to compensate serialization caching issue. This should be
# # changed to something else later.
# event SumStats::cluster_ss_response(uid, ss_name, copy(local_data), done, cleanup);
# if ( ! done )
# schedule 0.01 sec { SumStats::send_data(uid, T) };
# }
event SumStats::get_a_key(uid: string, ss_name: string, cleanup: bool)
{
#print fmt("WORKER %s: sending data for uid %s...", Cluster::node, uid);
local local_data: ResultTable = table();
local num_added = 0;
for ( key in data )
if ( uid in sending_results )
{
local_data[key] = data[key];
delete data[key];
# Only send cluster_send_in_groups_of at a time. Queue another
# event to send the next group.
if ( cluster_send_in_groups_of == ++num_added )
break;
if ( |sending_results[uid]| == 0 )
{
event SumStats::send_no_key(uid, ss_name);
}
else
{
for ( key in sending_results[uid] )
{
event SumStats::send_a_key(uid, ss_name, key);
# break to only send one.
break;
}
}
}
else if ( !cleanup && ss_name in result_store && |result_store[ss_name]| > 0 )
{
if ( |result_store[ss_name]| == 0 )
{
event SumStats::send_no_key(uid, ss_name);
}
else
{
for ( key in result_store[ss_name] )
{
event SumStats::send_a_key(uid, ss_name, key);
# break to only send one.
break;
}
}
}
else
{
event SumStats::send_no_key(uid, ss_name);
}
local done = F;
# If data is empty, this sumstat is done.
if ( |data| == 0 )
done = T;
# Note: copy is needed to compensate serialization caching issue. This should be
# changed to something else later.
event SumStats::cluster_ss_response(uid, ssid, copy(local_data), done);
if ( ! done )
schedule 0.01 sec { SumStats::send_data(uid, ssid, data) };
}
event SumStats::cluster_ss_request(uid: string, ssid: string)
event SumStats::cluster_ss_request(uid: string, ss_name: string, cleanup: bool)
{
#print fmt("WORKER %s: received the cluster_ss_request event for %s.", Cluster::node, id);
# Initiate sending all of the data for the requested stats.
if ( ssid in result_store )
event SumStats::send_data(uid, ssid, result_store[ssid]);
else
event SumStats::send_data(uid, ssid, table());
# Create a back store for the result
sending_results[uid] = (ss_name in result_store) ? result_store[ss_name] : table();
# Lookup the actual sumstats and reset it, the reference to the data
# currently stored will be maintained internally by the send_data event.
if ( ssid in stats_store )
reset(stats_store[ssid]);
# currently stored will be maintained internally from the
# sending_results table.
if ( cleanup && ss_name in stats_store )
reset(stats_store[ss_name]);
}
event SumStats::cluster_key_request(uid: string, ssid: string, key: Key)
event SumStats::cluster_get_result(uid: string, ss_name: string, key: Key, cleanup: bool)
{
if ( ssid in result_store && key in result_store[ssid] )
{
#print fmt("WORKER %s: received the cluster_key_request event for %s=%s.", Cluster::node, key2str(key), data);
#print fmt("WORKER %s: received the cluster_get_result event for %s=%s.", Cluster::node, key2str(key), data);
# Note: copy is needed to compensate serialization caching issue. This should be
# changed to something else later.
event SumStats::cluster_key_response(uid, ssid, key, copy(result_store[ssid][key]));
if ( cleanup ) # data will implicitly be in sending_results (i know this isn't great)
{
if ( uid in sending_results && key in sending_results[uid] )
{
# Note: copy is needed to compensate serialization caching issue. This should be
# changed to something else later.
event SumStats::cluster_send_result(uid, ss_name, key, copy(sending_results[uid][key]), cleanup);
delete sending_results[uid][key];
}
else
{
# We need to send an empty response if we don't have the data so that the manager
# can know that it heard back from all of the workers.
event SumStats::cluster_send_result(uid, ss_name, key, table(), cleanup);
}
}
else
{
# We need to send an empty response if we don't have the data so that the manager
# can know that it heard back from all of the workers.
event SumStats::cluster_key_response(uid, ssid, key, table());
if ( ss_name in result_store && key in result_store[ss_name] )
{
event SumStats::cluster_send_result(uid, ss_name, key, copy(result_store[ss_name][key]), cleanup);
}
else
{
# We need to send an empty response if we don't have the data so that the manager
# can know that it heard back from all of the workers.
event SumStats::cluster_send_result(uid, ss_name, key, table(), cleanup);
}
}
}
event SumStats::cluster_threshold_crossed(ssid: string, key: SumStats::Key, thold: Thresholding)
event SumStats::cluster_threshold_crossed(ss_name: string, key: SumStats::Key, thold_index: count)
{
if ( ssid !in threshold_tracker )
threshold_tracker[ssid] = table();
if ( ss_name !in threshold_tracker )
threshold_tracker[ss_name] = table();
threshold_tracker[ssid][key] = thold;
threshold_tracker[ss_name][key] = thold_index;
}
event SumStats::thresholds_reset(ssid: string)
event SumStats::thresholds_reset(ss_name: string)
{
threshold_tracker[ssid] = table();
delete threshold_tracker[ss_name];
}
@endif
@ -186,7 +236,7 @@ event SumStats::thresholds_reset(ssid: string)
# This variable is maintained by manager nodes as they collect and aggregate
# results.
# Index on a uid.
global stats_results: table[string] of ResultTable &read_expire=1min;
global stats_keys: table[string] of set[Key] &create_expire=1min;
# This variable is maintained by manager nodes to track how many "dones" they
# collected per collection unique id. Once the number of results for a uid
@ -194,18 +244,18 @@ global stats_results: table[string] of ResultTable &read_expire=1min;
# result is written out and deleted from here.
# Indexed on a uid.
# TODO: add an &expire_func in case not all results are received.
global done_with: table[string] of count &read_expire=1min &default=0;
global done_with: table[string] of count &create_expire=1min &default=0;
# This variable is maintained by managers to track intermediate responses as
# they are getting a global view for a certain key.
# Indexed on a uid.
global key_requests: table[string] of Result &read_expire=1min;
global key_requests: table[string] of Result &create_expire=1min;
# This variable is maintained by managers to prevent overwhelming communication due
# to too many intermediate updates. Each sumstat is tracked separately so that
# one won't overwhelm and degrade other quieter sumstats.
# Indexed on a sumstat id.
global outstanding_global_views: table[string] of count &default=0;
global outstanding_global_views: table[string] of count &create_expire=1min &default=0;
const zero_time = double_to_time(0.0);
# Managers handle logging.
@ -213,15 +263,19 @@ event SumStats::finish_epoch(ss: SumStat)
{
if ( network_time() > zero_time )
{
#print fmt("%.6f MANAGER: breaking %s sumstat for %s sumstat", network_time(), ss$name, ss$id);
#print fmt("%.6f MANAGER: breaking %s sumstat", network_time(), ss$name);
local uid = unique_id("");
if ( uid in stats_results )
delete stats_results[uid];
stats_results[uid] = table();
if ( uid in stats_keys )
delete stats_keys[uid];
stats_keys[uid] = set();
# Request data from peers.
event SumStats::cluster_ss_request(uid, ss$id);
event SumStats::cluster_ss_request(uid, ss$name, T);
done_with[uid] = 0;
#print fmt("get_key by uid: %s", uid);
event SumStats::get_a_key(uid, ss$name, T);
}
# Schedule the next finish_epoch event.
@ -235,51 +289,160 @@ function data_added(ss: SumStat, key: Key, result: Result)
if ( check_thresholds(ss, key, result, 1.0) )
{
threshold_crossed(ss, key, result);
event SumStats::cluster_threshold_crossed(ss$id, key, threshold_tracker[ss$id][key]);
event SumStats::cluster_threshold_crossed(ss$name, key, threshold_tracker[ss$name][key]);
}
}
event SumStats::cluster_key_response(uid: string, ssid: string, key: Key, result: Result)
function handle_end_of_result_collection(uid: string, ss_name: string, key: Key, cleanup: bool)
{
#print fmt("worker_count:%d :: done_with:%d", Cluster::worker_count, done_with[uid]);
local ss = stats_store[ss_name];
local ir = key_requests[uid];
if ( check_thresholds(ss, key, ir, 1.0) )
{
threshold_crossed(ss, key, ir);
event SumStats::cluster_threshold_crossed(ss_name, key, threshold_tracker[ss_name][key]);
}
if ( cleanup )
{
# This is done here because "cleanup" implicitly means
# it's the end of an epoch.
if ( ss?$epoch_result && |ir| > 0 )
{
local now = network_time();
ss$epoch_result(now, key, ir);
}
# Check that there is an outstanding view before subtracting.
# Global views only apply to non-dynamic requests. Dynamic
# requests must be serviced.
if ( outstanding_global_views[ss_name] > 0 )
--outstanding_global_views[ss_name];
}
delete key_requests[uid];
delete done_with[uid];
}
function request_all_current_keys(uid: string, ss_name: string, cleanup: bool)
{
#print "request_all_current_keys";
if ( uid in stats_keys && |stats_keys[uid]| > 0 )
{
#print fmt(" -- %d remaining keys here", |stats_keys[uid]|);
for ( key in stats_keys[uid] )
{
done_with[uid] = 0;
event SumStats::cluster_get_result(uid, ss_name, key, cleanup);
when ( uid in done_with && Cluster::worker_count == done_with[uid] )
{
#print "done getting result";
handle_end_of_result_collection(uid, ss_name, key, cleanup);
request_all_current_keys(uid, ss_name, cleanup);
}
delete stats_keys[uid][key];
break; # only a single key
}
}
else
{
# Get more keys! And this breaks us out of the evented loop.
done_with[uid] = 0;
#print fmt("get_key by uid: %s", uid);
event SumStats::get_a_key(uid, ss_name, cleanup);
}
}
event SumStats::send_no_key(uid: string, ss_name: string)
{
#print "send_no_key";
++done_with[uid];
if ( Cluster::worker_count == done_with[uid] )
{
delete done_with[uid];
if ( |stats_keys[uid]| > 0 )
{
#print "we need more keys!";
# Now that we have a key from each worker, lets
# grab all of the results.
request_all_current_keys(uid, ss_name, T);
}
else
{
#print "we're out of keys!";
local ss = stats_store[ss_name];
if ( ss?$epoch_finished )
ss$epoch_finished(network_time());
}
}
}
event SumStats::send_a_key(uid: string, ss_name: string, key: Key)
{
#print fmt("send_a_key %s", key);
if ( uid !in stats_keys )
{
# no clue what happened here
return;
}
if ( key !in stats_keys[uid] )
add stats_keys[uid][key];
++done_with[uid];
if ( Cluster::worker_count == done_with[uid] )
{
delete done_with[uid];
if ( |stats_keys[uid]| > 0 )
{
#print "we need more keys!";
# Now that we have a key from each worker, lets
# grab all of the results.
request_all_current_keys(uid, ss_name, T);
}
else
{
#print "we're out of keys!";
local ss = stats_store[ss_name];
if ( ss?$epoch_finished )
ss$epoch_finished(network_time());
}
}
}
event SumStats::cluster_send_result(uid: string, ss_name: string, key: Key, result: Result, cleanup: bool)
{
#print "cluster_send_result";
#print fmt("%0.6f MANAGER: receiving key data from %s - %s=%s", network_time(), get_event_peer()$descr, key2str(key), result);
# We only want to try and do a value merge if there are actually measured datapoints
# in the Result.
if ( uid in key_requests )
key_requests[uid] = compose_results(key_requests[uid], result);
else
if ( uid !in key_requests || |key_requests[uid]| == 0 )
key_requests[uid] = result;
else
key_requests[uid] = compose_results(key_requests[uid], result);
# Mark that a worker is done.
++done_with[uid];
#print fmt("worker_count:%d :: done_with:%d", Cluster::worker_count, done_with[uid]);
if ( Cluster::worker_count == done_with[uid] )
{
local ss = stats_store[ssid];
local ir = key_requests[uid];
if ( check_thresholds(ss, key, ir, 1.0) )
{
threshold_crossed(ss, key, ir);
event SumStats::cluster_threshold_crossed(ss$id, key, threshold_tracker[ss$id][key]);
}
delete done_with[uid];
delete key_requests[uid];
# Check that there is an outstanding view before subtracting.
if ( outstanding_global_views[ssid] > 0 )
--outstanding_global_views[ssid];
}
#if ( Cluster::worker_count == done_with[uid] )
# {
# print "done";
# handle_end_of_result_collection(uid, ss_name, key, cleanup);
# }
}
# Managers handle intermediate updates here.
event SumStats::cluster_key_intermediate_response(ssid: string, key: Key)
event SumStats::cluster_key_intermediate_response(ss_name: string, key: Key)
{
#print fmt("MANAGER: receiving intermediate key data from %s", get_event_peer()$descr);
#print fmt("MANAGER: requesting key data for %s", key2str(key));
if ( ssid in outstanding_global_views &&
|outstanding_global_views[ssid]| > max_outstanding_global_views )
if ( ss_name in outstanding_global_views &&
|outstanding_global_views[ss_name]| > max_outstanding_global_views )
{
# Don't do this intermediate update. Perhaps at some point in the future
# we will queue and randomly select from these ignored intermediate
@ -287,60 +450,131 @@ event SumStats::cluster_key_intermediate_response(ssid: string, key: Key)
return;
}
++outstanding_global_views[ssid];
++outstanding_global_views[ss_name];
local uid = unique_id("");
event SumStats::cluster_key_request(uid, ssid, key);
}
event SumStats::cluster_ss_response(uid: string, ssid: string, data: ResultTable, done: bool)
{
#print fmt("MANAGER: receiving results from %s", get_event_peer()$descr);
# Mark another worker as being "done" for this uid.
if ( done )
++done_with[uid];
local local_data = stats_results[uid];
local ss = stats_store[ssid];
for ( key in data )
done_with[uid] = 0;
event SumStats::cluster_get_result(uid, ss_name, key, F);
when ( uid in done_with && Cluster::worker_count == done_with[uid] )
{
if ( key in local_data )
local_data[key] = compose_results(local_data[key], data[key]);
else
local_data[key] = data[key];
# If a stat is done being collected, thresholds for each key
# need to be checked so we're doing it here to avoid doubly
# iterating over each key.
if ( Cluster::worker_count == done_with[uid] )
{
if ( check_thresholds(ss, key, local_data[key], 1.0) )
{
threshold_crossed(ss, key, local_data[key]);
event SumStats::cluster_threshold_crossed(ss$id, key, threshold_tracker[ss$id][key]);
}
}
handle_end_of_result_collection(uid, ss_name, key, F);
}
timeout 1.1min
{
Reporter::warning(fmt("Dynamic SumStat intermediate key request for %s (%s) took longer than 1 minute and was automatically cancelled.", ss_name, key));
}
# If the data has been collected from all peers, we are done and ready to finish.
if ( Cluster::worker_count == done_with[uid] )
{
if ( ss?$epoch_finished )
ss$epoch_finished(local_data);
}
#event SumStats::cluster_ss_response(uid: string, ss_name: string, data: ResultTable, done: bool, cleanup: bool)
# {
# #print fmt("MANAGER: receiving results from %s", get_event_peer()$descr);
#
# # Mark another worker as being "done" for this uid.
# if ( done )
# ++done_with[uid];
#
# # We had better only be getting requests for stuff that exists.
# if ( ss_name !in stats_store )
# return;
#
# if ( uid !in stats_keys )
# stats_keys[uid] = table();
#
# local local_data = stats_keys[uid];
# local ss = stats_store[ss_name];
#
# for ( key in data )
# {
# if ( key in local_data )
# local_data[key] = compose_results(local_data[key], data[key]);
# else
# local_data[key] = data[key];
#
# # If a stat is done being collected, thresholds for each key
# # need to be checked so we're doing it here to avoid doubly
# # iterating over each key.
# if ( Cluster::worker_count == done_with[uid] )
# {
# if ( check_thresholds(ss, key, local_data[key], 1.0) )
# {
# threshold_crossed(ss, key, local_data[key]);
# event SumStats::cluster_threshold_crossed(ss$name, key, threshold_tracker[ss$name][key]);
# }
# }
# }
#
# # If the data has been collected from all peers, we are done and ready to finish.
# if ( cleanup && Cluster::worker_count == done_with[uid] )
# {
# local now = network_time();
# if ( ss?$epoch_result )
# {
# for ( key in local_data )
# ss$epoch_result(now, key, local_data[key]);
# }
#
# if ( ss?$epoch_finished )
# ss$epoch_finished(now);
#
# # Clean up
# delete stats_keys[uid];
# delete done_with[uid];
# reset(ss);
# }
# }
#function request(ss_name: string): ResultTable
# {
# # This only needs to be implemented this way for cluster compatibility.
# local uid = unique_id("dyn-");
# stats_keys[uid] = table();
# done_with[uid] = 0;
# event SumStats::cluster_ss_request(uid, ss_name, F);
#
# return when ( uid in done_with && Cluster::worker_count == done_with[uid] )
# {
# if ( uid in stats_keys )
# {
# local ss_result = stats_keys[uid];
# # Clean up
# delete stats_keys[uid];
# delete done_with[uid];
# reset(stats_store[ss_name]);
# return ss_result;
# }
# else
# return table();
# }
# timeout 1.1min
# {
# Reporter::warning(fmt("Dynamic SumStat request for %s took longer than 1 minute and was automatically cancelled.", ss_name));
# return table();
# }
# }
function request_key(ss_name: string, key: Key): Result
{
local uid = unique_id("");
done_with[uid] = 0;
key_requests[uid] = table();
event SumStats::cluster_get_result(uid, ss_name, key, F);
return when ( uid in done_with && Cluster::worker_count == done_with[uid] )
{
#print "done with request_key";
local result = key_requests[uid];
# Clean up
delete stats_results[uid];
delete key_requests[uid];
delete done_with[uid];
# Not sure I need to reset the sumstat on the manager.
reset(ss);
return result;
}
timeout 1.1min
{
Reporter::warning(fmt("Dynamic SumStat key request for %s (%s) took longer than 1 minute and was automatically cancelled.", ss_name, key));
return table();
}
}
event remote_connection_handshake_done(p: event_peer) &priority=5
{
send_id(p, "SumStats::stats_store");
send_id(p, "SumStats::reducer_store");
}
@endif

View file

@ -74,10 +74,6 @@ export {
## Type to store results for multiple reducers.
type Result: table[string] of ResultVal;
## Type to store a table of sumstats results indexed
## by keys.
type ResultTable: table[Key] of Result;
## SumStats represent an aggregation of reducers along with
## mechanisms to handle various situations like the epoch ending
## or thresholds being crossed.
@ -87,8 +83,12 @@ export {
## is no assurance provided as to where the callbacks
## will be executed on clusters.
type SumStat: record {
## An arbitrary name for the sumstat so that it can
## be referred to later.
name: string;
## The interval at which this filter should be "broken"
## and the '$epoch_finished' callback called. The
## and the '$epoch_result' callback called. The
## results are also reset at this time so any threshold
## based detection needs to be set to a
## value that should be expected to happen within
@ -102,22 +102,28 @@ export {
## :bro:see:`SumStats::Result` structure which will be used
## for thresholding.
## This is required if a $threshold value is given.
threshold_val: function(key: SumStats::Key, result: SumStats::Result): count &optional;
threshold_val: function(key: SumStats::Key, result: SumStats::Result): double &optional;
## The threshold value for calling the
## $threshold_crossed callback.
threshold: count &optional;
threshold: double &optional;
## A series of thresholds for calling the
## $threshold_crossed callback.
threshold_series: vector of count &optional;
threshold_series: vector of double &optional;
## A callback that is called when a threshold is crossed.
threshold_crossed: function(key: SumStats::Key, result: SumStats::Result) &optional;
## A callback with the full collection of Results for
## this SumStat.
epoch_finished: function(rt: SumStats::ResultTable) &optional;
## A callback that receives each of the results at the
## end of the analysis epoch. The function will be
## called once for each key.
epoch_result: function(ts: time, key: SumStats::Key, result: SumStats::Result) &optional;
## A callback that will be called when a single collection
## interval is completed. The ts value will be the time of
## when the collection started.
epoch_finished: function(ts:time) &optional;
};
## Create a summary statistic.
@ -134,19 +140,23 @@ export {
## obs: The data point to send into the stream.
global observe: function(id: string, key: SumStats::Key, obs: SumStats::Observation);
## This record is primarily used for internal threshold tracking.
type Thresholding: record {
# Internal use only. Indicates if a simple threshold was already crossed.
is_threshold_crossed: bool &default=F;
# Internal use only. Current key for threshold series.
threshold_series_index: count &default=0;
};
## Dynamically request a sumstat key. This function should be
## used sparingly and not as a replacement for the callbacks
## from the :bro:see:`SumStat` record. The function is only
## available for use within "when" statements as an asynchronous
## function.
##
## ss_name: SumStat name.
##
## key: The SumStat key being requested.
##
## Returns: The result for the requested sumstat key.
global request_key: function(ss_name: string, key: Key): Result;
## This event is generated when thresholds are reset for a SumStat.
##
## ssid: SumStats ID that thresholds were reset for.
global thresholds_reset: event(ssid: string);
## name: SumStats name that thresholds were reset for.
global thresholds_reset: event(name: string);
## Helper function to represent a :bro:type:`SumStats::Key` value as
## a simple string.
@ -157,18 +167,49 @@ export {
global key2str: function(key: SumStats::Key): string;
}
# Type to store a table of sumstats results indexed by keys.
type ResultTable: table[Key] of Result;
# The function prototype for plugins to do calculations.
type ObserveFunc: function(r: Reducer, val: double, data: Observation, rv: ResultVal);
redef record Reducer += {
# Internal use only. Provides a reference back to the related SumStats by it's ID.
sid: string &optional;
# Internal use only. Provides a reference back to the related SumStats by its name.
ssname: string &optional;
calc_funcs: vector of Calculation &optional;
};
# Internal use only. For tracking thresholds per sumstat and key.
global threshold_tracker: table[string] of table[Key] of Thresholding &optional;
# In the case of a single threshold, 0 means the threshold isn't crossed.
# In the case of a threshold series, the number tracks the threshold offset.
global threshold_tracker: table[string] of table[Key] of count;
redef record SumStat += {
# Internal use only (mostly for cluster coherency).
id: string &optional;
};
function increment_threshold_tracker(ss_name: string, key: Key)
{
if ( ss_name !in threshold_tracker )
threshold_tracker[ss_name] = table();
if ( key !in threshold_tracker[ss_name] )
threshold_tracker[ss_name][key] = 0;
++threshold_tracker[ss_name][key];
}
function get_threshold_index(ss_name: string, key: Key): count
{
if ( ss_name !in threshold_tracker )
return 0;
if ( key !in threshold_tracker[ss_name] )
return 0;
return threshold_tracker[ss_name][key];
}
# Prototype the hook point for plugins to initialize any result values.
global init_resultval_hook: hook(r: Reducer, rv: ResultVal);
# Prototype the hook point for plugins to merge Results.
global compose_resultvals_hook: hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal);
# Store of sumstats indexed on the sumstat id.
global stats_store: table[string] of SumStat = table();
@ -182,20 +223,20 @@ global result_store: table[string] of ResultTable = table();
# Store of threshold information.
global thresholds_store: table[string, Key] of bool = table();
# Store the calculations.
global calc_store: table[Calculation] of ObserveFunc = table();
# Store the dependencies for Calculations.
global calc_deps: table[Calculation] of vector of Calculation = table();
# Hook for registering observation calculation plugins.
global register_observe_plugins: hook();
# This is called whenever key values are updated and the new val is given as the
# `val` argument. It's only prototyped here because cluster and non-cluster have
# separate implementations.
global data_added: function(ss: SumStat, key: Key, result: Result);
# Prototype the hook point for plugins to do calculations.
global observe_hook: hook(r: Reducer, val: double, data: Observation, rv: ResultVal);
# Prototype the hook point for plugins to initialize any result values.
global init_resultval_hook: hook(r: Reducer, rv: ResultVal);
# Prototype the hook point for plugins to merge Results.
global compose_resultvals_hook: hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal);
# Event that is used to "finish" measurements and adapt the measurement
# framework for clustered or non-clustered usage.
global finish_epoch: event(ss: SumStat);
@ -210,6 +251,24 @@ function key2str(key: Key): string
return fmt("sumstats_key(%s)", out);
}
function register_observe_plugin(calc: Calculation, func: ObserveFunc)
{
calc_store[calc] = func;
}
function add_observe_plugin_dependency(calc: Calculation, depends_on: Calculation)
{
if ( calc !in calc_deps )
calc_deps[calc] = vector();
calc_deps[calc][|calc_deps[calc]|] = depends_on;
}
event bro_init() &priority=100000
{
# Call all of the plugin registration hooks
hook register_observe_plugins();
}
function init_resultval(r: Reducer): ResultVal
{
local rv: ResultVal = [$begin=network_time(), $end=network_time()];
@ -234,25 +293,17 @@ function compose_results(r1: Result, r2: Result): Result
{
local result: Result = table();
if ( |r1| > |r2| )
for ( id in r1 )
{
for ( data_id in r1 )
{
if ( data_id in r2 )
result[data_id] = compose_resultvals(r1[data_id], r2[data_id]);
else
result[data_id] = r1[data_id];
}
result[id] = r1[id];
}
else
for ( id in r2 )
{
for ( data_id in r2 )
{
if ( data_id in r1 )
result[data_id] = compose_resultvals(r1[data_id], r2[data_id]);
else
result[data_id] = r2[data_id];
}
if ( id in r1 )
result[id] = compose_resultvals(r1[id], r2[id]);
else
result[id] = r2[id];
}
return result;
@ -261,18 +312,43 @@ function compose_results(r1: Result, r2: Result): Result
function reset(ss: SumStat)
{
if ( ss$id in result_store )
delete result_store[ss$id];
if ( ss$name in result_store )
delete result_store[ss$name];
result_store[ss$id] = table();
result_store[ss$name] = table();
if ( ss?$threshold || ss?$threshold_series )
if ( ss$name in threshold_tracker )
{
threshold_tracker[ss$id] = table();
event SumStats::thresholds_reset(ss$id);
delete threshold_tracker[ss$name];
threshold_tracker[ss$name] = table();
event SumStats::thresholds_reset(ss$name);
}
}
# This could potentially recurse forever, but plugin authors
# should be making sure they aren't causing reflexive dependencies.
function add_calc_deps(calcs: vector of Calculation, c: Calculation)
{
#print fmt("Checking for deps for %s", c);
for ( i in calc_deps[c] )
{
local skip_calc=F;
for ( j in calcs )
{
if ( calcs[j] == calc_deps[c][i] )
skip_calc=T;
}
if ( ! skip_calc )
{
if ( calc_deps[c][i] in calc_deps )
add_calc_deps(calcs, calc_deps[c][i]);
calcs[|c|] = calc_deps[c][i];
#print fmt("add dep for %s [%s] ", c, calc_deps[c][i]);
}
}
}
function create(ss: SumStat)
{
if ( (ss?$threshold || ss?$threshold_series) && ! ss?$threshold_val )
@ -280,14 +356,34 @@ function create(ss: SumStat)
Reporter::error("SumStats given a threshold with no $threshold_val function");
}
if ( ! ss?$id )
ss$id=unique_id("");
threshold_tracker[ss$id] = table();
stats_store[ss$id] = ss;
stats_store[ss$name] = ss;
if ( ss?$threshold || ss?$threshold_series )
threshold_tracker[ss$name] = table();
for ( reducer in ss$reducers )
{
reducer$sid = ss$id;
reducer$ssname = ss$name;
reducer$calc_funcs = vector();
for ( calc in reducer$apply )
{
# Add in dependencies recursively.
if ( calc in calc_deps )
add_calc_deps(reducer$calc_funcs, calc);
# Don't add this calculation to the vector if
# it was already added by something else as a
# dependency.
local skip_calc=F;
for ( j in reducer$calc_funcs )
{
if ( calc == reducer$calc_funcs[j] )
skip_calc=T;
}
if ( ! skip_calc )
reducer$calc_funcs[|reducer$calc_funcs|] = calc;
}
if ( reducer$stream !in reducer_store )
reducer_store[reducer$stream] = set();
add reducer_store[reducer$stream][reducer];
@ -313,9 +409,9 @@ function observe(id: string, key: Key, obs: Observation)
if ( r?$pred && ! r$pred(key, obs) )
next;
local ss = stats_store[r$sid];
local ss = stats_store[r$ssname];
# If there is a threshold and no epoch_finished callback
# If there is a threshold and no epoch_result callback
# we don't need to continue counting since the data will
# never be accessed. This was leading
# to some state management issues when measuring
@ -323,18 +419,21 @@ function observe(id: string, key: Key, obs: Observation)
# NOTE: this optimization could need removed in the
# future if on demand access is provided to the
# SumStats results.
if ( ! ss?$epoch_finished &&
r$sid in threshold_tracker &&
key in threshold_tracker[r$sid] &&
if ( ! ss?$epoch_result &&
r$ssname in threshold_tracker &&
( ss?$threshold &&
threshold_tracker[r$sid][key]$is_threshold_crossed ) ||
key in threshold_tracker[r$ssname] &&
threshold_tracker[r$ssname][key] != 0 ) ||
( ss?$threshold_series &&
threshold_tracker[r$sid][key]$threshold_series_index+1 == |ss$threshold_series| ) )
key in threshold_tracker[r$ssname] &&
threshold_tracker[r$ssname][key] == |ss$threshold_series| ) )
{
next;
}
if ( r$sid !in result_store )
result_store[ss$id] = table();
local results = result_store[r$sid];
if ( r$ssname !in result_store )
result_store[r$ssname] = table();
local results = result_store[r$ssname];
if ( key !in results )
results[key] = table();
@ -350,10 +449,13 @@ function observe(id: string, key: Key, obs: Observation)
# If a string was given, fall back to 1.0 as the value.
local val = 1.0;
if ( obs?$num || obs?$dbl )
val = obs?$dbl ? obs$dbl : obs$num;
if ( obs?$num )
val = obs$num;
else if ( obs?$dbl )
val = obs$dbl;
hook observe_hook(r, val, obs, result_val);
for ( i in r$calc_funcs )
calc_store[r$calc_funcs[i]](r, val, obs, result_val);
data_added(ss, key, result);
}
}
@ -362,10 +464,12 @@ function observe(id: string, key: Key, obs: Observation)
# mid-break-interval threshold crossing detection for cluster deployments.
function check_thresholds(ss: SumStat, key: Key, result: Result, modify_pct: double): bool
{
if ( ! (ss?$threshold || ss?$threshold_series) )
if ( ! (ss?$threshold || ss?$threshold_series || ss?$threshold_crossed) )
return F;
# Add in the extra ResultVals to make threshold_vals easier to write.
# This length comparison should work because we just need to make
# sure that we have the same number of reducers and results.
if ( |ss$reducers| != |result| )
{
for ( reducer in ss$reducers )
@ -378,28 +482,21 @@ function check_thresholds(ss: SumStat, key: Key, result: Result, modify_pct: dou
local watch = ss$threshold_val(key, result);
if ( modify_pct < 1.0 && modify_pct > 0.0 )
watch = double_to_count(floor(watch/modify_pct));
watch = watch/modify_pct;
if ( ss$id !in threshold_tracker )
threshold_tracker[ss$id] = table();
local t_tracker = threshold_tracker[ss$id];
local t_index = get_threshold_index(ss$name, key);
if ( key !in t_tracker )
{
local ttmp: Thresholding;
t_tracker[key] = ttmp;
}
local tt = t_tracker[key];
if ( ss?$threshold && ! tt$is_threshold_crossed && watch >= ss$threshold )
if ( ss?$threshold &&
t_index == 0 && # Check that the threshold hasn't already been crossed.
watch >= ss$threshold )
{
# Value crossed the threshold.
return T;
}
if ( ss?$threshold_series &&
|ss$threshold_series| >= tt$threshold_series_index &&
watch >= ss$threshold_series[tt$threshold_series_index] )
|ss$threshold_series| > t_index && # Check if there are more thresholds.
watch >= ss$threshold_series[t_index] )
{
# A threshold series was given and the value crossed the next
# value in the series.
@ -415,6 +512,8 @@ function threshold_crossed(ss: SumStat, key: Key, result: Result)
if ( ! ss?$threshold_crossed )
return;
increment_threshold_tracker(ss$name,key);
# Add in the extra ResultVals to make threshold_crossed callbacks easier to write.
if ( |ss$reducers| != |result| )
{
@ -426,11 +525,5 @@ function threshold_crossed(ss: SumStat, key: Key, result: Result)
}
ss$threshold_crossed(key, result);
local tt = threshold_tracker[ss$id][key];
tt$is_threshold_crossed = T;
# Bump up to the next threshold series index if a threshold series is being used.
if ( ss?$threshold_series )
++tt$threshold_series_index;
}

View file

@ -4,11 +4,20 @@ module SumStats;
event SumStats::finish_epoch(ss: SumStat)
{
if ( ss$id in result_store )
if ( ss$name in result_store )
{
local data = result_store[ss$id];
local now = network_time();
if ( ss?$epoch_result )
{
local data = result_store[ss$name];
# TODO: don't block here.
for ( key in data )
ss$epoch_result(now, key, data[key]);
}
if ( ss?$epoch_finished )
ss$epoch_finished(data);
ss$epoch_finished(now);
reset(ss);
}
@ -16,9 +25,32 @@ event SumStats::finish_epoch(ss: SumStat)
schedule ss$epoch { SumStats::finish_epoch(ss) };
}
function data_added(ss: SumStat, key: Key, result: Result)
{
if ( check_thresholds(ss, key, result, 1.0) )
threshold_crossed(ss, key, result);
}
function request(ss_name: string): ResultTable
{
# This only needs to be implemented this way for cluster compatibility.
return when ( T )
{
if ( ss_name in result_store )
return result_store[ss_name];
else
return table();
}
}
function request_key(ss_name: string, key: Key): Result
{
# This only needs to be implemented this way for cluster compatibility.
return when ( T )
{
if ( ss_name in result_store && key in result_store[ss_name] )
return result_store[ss_name][key];
else
return table();
}
}

View file

@ -1,4 +1,4 @@
@load base/frameworks/sumstats/main
@load ../main
module SumStats;
@ -14,17 +14,18 @@ export {
};
}
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
hook register_observe_plugins()
{
if ( AVERAGE in r$apply )
register_observe_plugin(AVERAGE, function(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
if ( ! rv?$average )
rv$average = val;
else
rv$average += (val - rv$average) / rv$num;
}
});
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
{
if ( rv1?$average && rv2?$average )

View file

@ -33,16 +33,20 @@ function get_last(rv: ResultVal): vector of Observation
return s;
}
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
hook register_observe_plugins()
{
if ( LAST in r$apply && r$num_last_elements > 0 )
register_observe_plugin(LAST, function(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
if ( ! rv?$last_elements )
rv$last_elements = Queue::init([$max_len=r$num_last_elements]);
Queue::put(rv$last_elements, obs);
}
if ( r$num_last_elements > 0 )
{
if ( ! rv?$last_elements )
rv$last_elements = Queue::init([$max_len=r$num_last_elements]);
Queue::put(rv$last_elements, obs);
}
});
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
{
# Merge $samples

View file

@ -1,4 +1,4 @@
@load base/frameworks/sumstats/main
@load ../main
module SumStats;
@ -14,15 +14,15 @@ export {
};
}
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
hook register_observe_plugins()
{
if ( MAX in r$apply )
register_observe_plugin(MAX, function(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
if ( ! rv?$max )
rv$max = val;
else if ( val > rv$max )
rv$max = val;
}
});
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)

View file

@ -1,4 +1,4 @@
@load base/frameworks/sumstats/main
@load ../main
module SumStats;
@ -14,17 +14,18 @@ export {
};
}
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
hook register_observe_plugins()
{
if ( MIN in r$apply )
register_observe_plugin(MIN, function(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
if ( ! rv?$min )
rv$min = val;
else if ( val < rv$min )
rv$min = val;
}
});
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
{
if ( rv1?$min && rv2?$min )

View file

@ -47,15 +47,14 @@ function sample_add_sample(obs:Observation, rv: ResultVal)
if ( ra < rv$num_samples )
rv$samples[ra] = obs;
}
}
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
hook register_observe_plugins()
{
if ( SAMPLE in r$apply )
register_observe_plugin(SAMPLE, function(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
sample_add_sample(obs, rv);
}
});
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
@ -75,7 +74,6 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
return;
}
if ( |rv1$samples| != num_samples && |rv2$samples| < num_samples )
{
if ( |rv1$samples| != rv1$sample_elements || |rv2$samples| < rv2$sample_elements )

View file

@ -1,5 +1,5 @@
@load base/frameworks/sumstats/main
@load ./variance
@load ../main
module SumStats;
@ -21,11 +21,18 @@ function calc_std_dev(rv: ResultVal)
rv$std_dev = sqrt(rv$variance);
}
# This depends on the variance plugin which uses priority -5
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal) &priority=-10
hook std_dev_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
if ( STD_DEV in r$apply )
calc_std_dev(rv);
}
hook register_observe_plugins() &priority=-10
{
register_observe_plugin(STD_DEV, function(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
calc_std_dev(rv);
});
add_observe_plugin_dependency(STD_DEV, VARIANCE);
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal) &priority=-10

View file

@ -1,4 +1,4 @@
@load base/frameworks/sumstats/main
@load ../main
module SumStats;
@ -14,19 +14,19 @@ export {
sum: double &default=0.0;
};
type threshold_function: function(key: SumStats::Key, result: SumStats::Result): count;
global sum_threshold: function(data_id: string): threshold_function;
#type threshold_function: function(key: SumStats::Key, result: SumStats::Result): count;
#global sum_threshold: function(data_id: string): threshold_function;
}
function sum_threshold(data_id: string): threshold_function
{
return function(key: SumStats::Key, result: SumStats::Result): count
{
print fmt("data_id: %s", data_id);
print result;
return double_to_count(result[data_id]$sum);
};
}
#function sum_threshold(data_id: string): threshold_function
# {
# return function(key: SumStats::Key, result: SumStats::Result): count
# {
# print fmt("data_id: %s", data_id);
# print result;
# return double_to_count(result[data_id]$sum);
# };
# }
hook init_resultval_hook(r: Reducer, rv: ResultVal)
{
@ -34,10 +34,12 @@ hook init_resultval_hook(r: Reducer, rv: ResultVal)
rv$sum = 0;
}
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
hook register_observe_plugins()
{
if ( SUM in r$apply )
register_observe_plugin(SUM, function(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
rv$sum += val;
});
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)

View file

@ -18,18 +18,20 @@ export {
}
hook register_observe_plugins()
{
register_observe_plugin(TOPK, function(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
topk_add(rv$topk, obs);
});
}
hook init_resultval_hook(r: Reducer, rv: ResultVal)
{
if ( TOPK in r$apply && ! rv?$topk )
rv$topk = topk_init(r$topk_size);
}
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
if ( TOPK in r$apply )
topk_add(rv$topk, obs);
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
{
if ( rv1?$topk )

View file

@ -1,4 +1,4 @@
@load base/frameworks/sumstats/main
@load ../main
module SumStats;
@ -23,15 +23,15 @@ redef record ResultVal += {
unique_vals: set[Observation] &optional;
};
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
hook register_observe_plugins()
{
if ( UNIQUE in r$apply )
register_observe_plugin(UNIQUE, function(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
if ( ! rv?$unique_vals )
rv$unique_vals=set();
add rv$unique_vals[obs];
rv$unique = |rv$unique_vals|;
}
});
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)

View file

@ -1,5 +1,5 @@
@load base/frameworks/sumstats/main
@load ./average
@load ../main
module SumStats;
@ -28,17 +28,17 @@ function calc_variance(rv: ResultVal)
rv$variance = (rv$num > 1) ? rv$var_s/(rv$num-1) : 0.0;
}
# Reduced priority since this depends on the average
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal) &priority=-5
hook register_observe_plugins() &priority=-5
{
if ( VARIANCE in r$apply )
register_observe_plugin(VARIANCE, function(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
if ( rv$num > 1 )
rv$var_s += ((val - rv$prev_avg) * (val - rv$average));
calc_variance(rv);
rv$prev_avg = rv$average;
}
});
add_observe_plugin_dependency(VARIANCE, AVERAGE);
}
# Reduced priority since this depends on the average

View file

@ -39,6 +39,7 @@
@load base/frameworks/tunnels
@load base/protocols/conn
@load base/protocols/dhcp
@load base/protocols/dnp3
@load base/protocols/dns
@load base/protocols/ftp

View file

@ -0,0 +1,4 @@
@load ./consts
@load ./main
@load-sigs ./dpd.sig

View file

@ -0,0 +1,20 @@
##! Types, errors, and fields for analyzing DHCP data. A helper file
##! for DHCP analysis scripts.
module DHCP;
export {
## Types of DHCP messages. See RFC 1533.
const message_types = {
[1] = "DHCP_DISCOVER",
[2] = "DHCP_OFFER",
[3] = "DHCP_REQUEST",
[4] = "DHCP_DECLINE",
[5] = "DHCP_ACK",
[6] = "DHCP_NAK",
[7] = "DHCP_RELEASE",
[8] = "DHCP_INFORM",
} &default = function(n: count): string { return fmt("unknown-message-type-%d", n); };
}

View file

@ -0,0 +1,5 @@
signature dhcp_cookie {
ip-proto == udp
payload /^.*\x63\x82\x53\x63/
enable "dhcp"
}

View file

@ -0,0 +1,75 @@
##! Analyzes DHCP traffic in order to log DHCP leases given to clients.
##! This script ignores large swaths of the protocol, since it is rather
##! noisy on most networks, and focuses on the end-result: assigned leases.
##!
##! If you'd like to track known DHCP devices and to log the hostname
##! supplied by the client, see policy/protocols/dhcp/known-devices.bro
@load ./utils.bro
module DHCP;
export {
redef enum Log::ID += { LOG };
## The record type which contains the column fields of the DHCP log.
type Info: record {
## The earliest time at which a DHCP message over the
## associated connection is observed.
ts: time &log;
## A unique identifier of the connection over which DHCP is
## occuring.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## Client's hardware address.
mac: string &log &optional;
## Client's actual assigned IP address.
assigned_ip: addr &log &optional;
## IP address lease interval.
lease_time: interval &log &optional;
## A random number choosen by the client for this transaction.
trans_id: count &log;
};
## Event that can be handled to access the DHCP
## record as it is sent on to the logging framework.
global log_dhcp: event(rec: Info);
}
# Add the dhcp info to the connection record
redef record connection += {
dhcp: Info &optional;
};
# 67/udp is the server's port, 68/udp the client.
const ports = { 67/udp, 68/udp };
redef likely_server_ports += { 67/udp };
event bro_init()
{
Log::create_stream(DHCP::LOG, [$columns=Info, $ev=log_dhcp]);
Analyzer::register_for_ports(Analyzer::ANALYZER_DHCP, ports);
}
event dhcp_ack(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_list, lease: interval, serv_addr: addr, host_name: string)
{
local info: Info;
info$ts = network_time();
info$id = c$id;
info$uid = c$uid;
info$lease_time = lease;
info$trans_id = msg$xid;
if ( msg$h_addr != "" )
info$mac = msg$h_addr;
if ( reverse_ip(msg$yiaddr) != 0.0.0.0 )
info$assigned_ip = reverse_ip(msg$yiaddr);
else
info$assigned_ip = c$id$orig_h;
c$dhcp = info;
Log::write(DHCP::LOG, c$dhcp);
}

View file

@ -0,0 +1,21 @@
##! Utilities specific for DHCP processing.
@load ./main
module DHCP;
export {
## Reverse the octets of an IPv4 IP.
##
## ip: An :bro:type:`addr` IPv4 address.
##
## Returns: A reversed addr.
global reverse_ip: function(ip: addr): addr;
}
function reverse_ip(ip: addr): addr
{
local octets = split(cat(ip), /\./);
return to_addr(cat(octets[4], ".", octets[3], ".", octets[2], ".", octets[1]));
}

View file

@ -137,8 +137,9 @@ function log_record(info: Info)
}
timeout 15secs
{
Reporter::info(fmt("SSL delay tokens not released in time (%s tokens remaining)",
|info$delay_tokens|));
# We are just going to log the record anyway.
delete info$delay_tokens;
log_record(info);
}
}
}

View file

@ -28,7 +28,7 @@ event Dir::monitor_ev(dir: string, last_files: set[string],
callback: function(fname: string),
poll_interval: interval)
{
when ( local result = Exec::run([$cmd=fmt("ls -i \"%s/\"", str_shell_escape(dir))]) )
when ( local result = Exec::run([$cmd=fmt("ls -i -1 \"%s/\"", str_shell_escape(dir))]) )
{
if ( result$exit_code != 0 )
{

View file

@ -163,6 +163,7 @@ function run(cmd: Command): Result
Input::add_event([$name=cmd$uid,
$source=fmt("%s |", cmd$cmd),
$reader=Input::READER_RAW,
$mode=Input::STREAM,
$fields=Exec::OneLine,
$ev=Exec::line,
$want_record=F,

View file

@ -1,109 +0,0 @@
@load base/protocols/http
@load base/protocols/ssl
@load base/frameworks/sumstats
module AppStats;
export {
redef enum Log::ID += { LOG };
type Info: record {
## Timestamp when the log line was finished and written.
ts: time &log;
## Time interval that the log line covers.
ts_delta: interval &log;
## The name of the "app", like "facebook" or "netflix".
app: string &log;
## The number of unique local hosts using the app.
uniq_hosts: count &log;
## The number of hits to the app in total.
hits: count &log;
## The total number of bytes received by users of the app.
bytes: count &log;
};
## The frequency of logging the stats collected by this script.
const break_interval = 15mins &redef;
}
redef record connection += {
resp_hostname: string &optional;
};
event bro_init() &priority=3
{
Log::create_stream(AppStats::LOG, [$columns=Info]);
local r1: SumStats::Reducer = [$stream="apps.bytes", $apply=set(SumStats::SUM)];
local r2: SumStats::Reducer = [$stream="apps.hits", $apply=set(SumStats::UNIQUE)];
SumStats::create([$epoch=break_interval,
$reducers=set(r1, r2),
$epoch_finished(data: SumStats::ResultTable) =
{
local l: Info;
l$ts = network_time();
l$ts_delta = break_interval;
for ( key in data )
{
local result = data[key];
l$app = key$str;
l$bytes = double_to_count(floor(result["apps.bytes"]$sum));
l$hits = result["apps.hits"]$num;
l$uniq_hosts = result["apps.hits"]$unique;
Log::write(LOG, l);
}
}]);
}
function add_sumstats(id: conn_id, hostname: string, size: count)
{
if ( /\.youtube\.com$/ in hostname && size > 512*1024 )
{
SumStats::observe("apps.bytes", [$str="youtube"], [$num=size]);
SumStats::observe("apps.hits", [$str="youtube"], [$str=cat(id$orig_h)]);
}
else if ( /(\.facebook\.com|\.fbcdn\.net)$/ in hostname && size > 20 )
{
SumStats::observe("apps.bytes", [$str="facebook"], [$num=size]);
SumStats::observe("apps.hits", [$str="facebook"], [$str=cat(id$orig_h)]);
}
else if ( /\.google\.com$/ in hostname && size > 20 )
{
SumStats::observe("apps.bytes", [$str="google"], [$num=size]);
SumStats::observe("apps.hits", [$str="google"], [$str=cat(id$orig_h)]);
}
else if ( /\.nflximg\.com$/ in hostname && size > 200*1024 )
{
SumStats::observe("apps.bytes", [$str="netflix"], [$num=size]);
SumStats::observe("apps.hits", [$str="netflix"], [$str=cat(id$orig_h)]);
}
else if ( /\.(pandora|p-cdn)\.com$/ in hostname && size > 512*1024 )
{
SumStats::observe("apps.bytes", [$str="pandora"], [$num=size]);
SumStats::observe("apps.hits", [$str="pandora"], [$str=cat(id$orig_h)]);
}
else if ( /\.gmail\.com$/ in hostname && size > 20 )
{
SumStats::observe("apps.bytes", [$str="gmail"], [$num=size]);
SumStats::observe("apps.hits", [$str="gmail"], [$str=cat(id$orig_h)]);
}
}
event ssl_established(c: connection)
{
if ( c?$ssl && c$ssl?$server_name )
c$resp_hostname = c$ssl$server_name;
}
event connection_finished(c: connection)
{
if ( c?$resp_hostname )
add_sumstats(c$id, c$resp_hostname, c$resp$size);
}
event HTTP::log_http(rec: HTTP::Info)
{
if( rec?$host )
add_sumstats(rec$id, rec$host, rec$response_body_len);
}

View file

@ -0,0 +1,2 @@
@load ./main
@load ./plugins

View file

@ -0,0 +1,77 @@
#! AppStats collects information about web applications in use
#! on the network.
@load base/protocols/http
@load base/protocols/ssl
@load base/frameworks/sumstats
module AppStats;
export {
redef enum Log::ID += { LOG };
type Info: record {
## Timestamp when the log line was finished and written.
ts: time &log;
## Time interval that the log line covers.
ts_delta: interval &log;
## The name of the "app", like "facebook" or "netflix".
app: string &log;
## The number of unique local hosts using the app.
uniq_hosts: count &log;
## The number of hits to the app in total.
hits: count &log;
## The total number of bytes received by users of the app.
bytes: count &log;
};
## The frequency of logging the stats collected by this script.
const break_interval = 15mins &redef;
}
redef record connection += {
resp_hostname: string &optional;
};
global add_sumstats: hook(id: conn_id, hostname: string, size: count);
event bro_init() &priority=3
{
Log::create_stream(AppStats::LOG, [$columns=Info]);
local r1: SumStats::Reducer = [$stream="apps.bytes", $apply=set(SumStats::SUM)];
local r2: SumStats::Reducer = [$stream="apps.hits", $apply=set(SumStats::UNIQUE)];
SumStats::create([$name="app-metrics",
$epoch=break_interval,
$reducers=set(r1, r2),
$epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) =
{
local l: Info;
l$ts = network_time();
l$ts_delta = break_interval;
l$app = key$str;
l$bytes = double_to_count(floor(result["apps.bytes"]$sum));
l$hits = result["apps.hits"]$num;
l$uniq_hosts = result["apps.hits"]$unique;
Log::write(LOG, l);
}]);
}
event ssl_established(c: connection)
{
if ( c?$ssl && c$ssl?$server_name )
c$resp_hostname = c$ssl$server_name;
}
event connection_finished(c: connection)
{
if ( c?$resp_hostname )
hook add_sumstats(c$id, c$resp_hostname, c$resp$size);
}
event HTTP::log_http(rec: HTTP::Info)
{
if( rec?$host )
hook add_sumstats(rec$id, rec$host, rec$response_body_len);
}

View file

@ -0,0 +1,6 @@
@load ./facebook
@load ./gmail
@load ./google
@load ./netflix
@load ./pandora
@load ./youtube

View file

@ -0,0 +1,12 @@
@load ../main
module AppStats;
hook add_sumstats(id: conn_id, hostname: string, size: count)
{
if ( /\.(facebook\.com|fbcdn\.net)$/ in hostname && size > 20 )
{
SumStats::observe("apps.bytes", [$str="facebook"], [$num=size]);
SumStats::observe("apps.hits", [$str="facebook"], [$str=cat(id$orig_h)]);
}
}

View file

@ -0,0 +1,12 @@
@load ../main
module AppStats;
hook add_sumstats(id: conn_id, hostname: string, size: count)
{
if ( /\.gmail\.com$/ in hostname && size > 20 )
{
SumStats::observe("apps.bytes", [$str="gmail"], [$num=size]);
SumStats::observe("apps.hits", [$str="gmail"], [$str=cat(id$orig_h)]);
}
}

View file

@ -0,0 +1,12 @@
@load ../main
module AppStats;
hook add_sumstats(id: conn_id, hostname: string, size: count)
{
if ( /\.google\.com$/ in hostname && size > 20 )
{
SumStats::observe("apps.bytes", [$str="google"], [$num=size]);
SumStats::observe("apps.hits", [$str="google"], [$str=cat(id$orig_h)]);
}
}

View file

@ -0,0 +1,12 @@
@load ../main
module AppStats;
hook add_sumstats(id: conn_id, hostname: string, size: count)
{
if ( /\.nflximg\.com$/ in hostname && size > 200*1024 )
{
SumStats::observe("apps.bytes", [$str="netflix"], [$num=size]);
SumStats::observe("apps.hits", [$str="netflix"], [$str=cat(id$orig_h)]);
}
}

View file

@ -0,0 +1,12 @@
@load ../main
module AppStats;
hook add_sumstats(id: conn_id, hostname: string, size: count)
{
if ( /\.(pandora|p-cdn)\.com$/ in hostname && size > 512*1024 )
{
SumStats::observe("apps.bytes", [$str="pandora"], [$num=size]);
SumStats::observe("apps.hits", [$str="pandora"], [$str=cat(id$orig_h)]);
}
}

View file

@ -0,0 +1,12 @@
@load ../main
module AppStats;
hook add_sumstats(id: conn_id, hostname: string, size: count)
{
if ( /\.youtube\.com$/ in hostname && size > 512*1024 )
{
SumStats::observe("apps.bytes", [$str="youtube"], [$num=size]);
SumStats::observe("apps.hits", [$str="youtube"], [$str=cat(id$orig_h)]);
}
}

View file

@ -29,7 +29,7 @@ export {
## Defines the threshold for ICMP Time Exceeded messages for a src-dst pair.
## This threshold only comes into play after a host is found to be
## sending low ttl packets.
const icmp_time_exceeded_threshold = 3 &redef;
const icmp_time_exceeded_threshold: double = 3 &redef;
## Interval at which to watch for the
## :bro:id:`Traceroute::icmp_time_exceeded_threshold` variable to be
@ -57,16 +57,17 @@ event bro_init() &priority=5
local r1: SumStats::Reducer = [$stream="traceroute.time_exceeded", $apply=set(SumStats::UNIQUE)];
local r2: SumStats::Reducer = [$stream="traceroute.low_ttl_packet", $apply=set(SumStats::SUM)];
SumStats::create([$epoch=icmp_time_exceeded_interval,
SumStats::create([$name="traceroute-detection",
$epoch=icmp_time_exceeded_interval,
$reducers=set(r1, r2),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
# Give a threshold value of zero depending on if the host
# sends a low ttl packet.
if ( require_low_ttl_packets && result["traceroute.low_ttl_packet"]$sum == 0 )
return 0;
return 0.0;
else
return result["traceroute.time_exceeded"]$unique;
return result["traceroute.time_exceeded"]$unique+0;
},
$threshold=icmp_time_exceeded_threshold,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =

View file

@ -0,0 +1,41 @@
##! This script provides infrastructure for logging devices for which Bro has been
##! able to determine the MAC address, and it logs them once per day (by default).
##! The log that is output provides an easy way to determine a count of the devices
##! in use on a network per day.
##!
##! ..note::
##!
##! This script will not generate any logs on its own, it needs to be
##! supplied with information from elsewhere, such as
##! :doc:`policy/protocols/dhcp/known-devices-and-hostnames/scripts/.
module Known;
export {
## The known-hosts logging stream identifier.
redef enum Log::ID += { DEVICES_LOG };
## The record type which contains the column fields of the known-devices log.
type DevicesInfo: record {
## The timestamp at which the host was detected.
ts: time &log;
## The MAC address that was detected.
mac: string &log;
};
## The set of all known MAC addresses. It can accessed from other
## to add, and check for, addresses seen in use.
##
## We maintain each entry for 24 hours by default so that the existence of
## individual addressed is logged each day.
global known_devices: set[string] &create_expire=1day &synchronized &redef;
## An event that can be handled to access the :bro:type:`Known::DevicesInfo`
## record as it is sent on to the logging framework.
global log_known_devices: event(rec: DevicesInfo);
}
event bro_init()
{
Log::create_stream(Known::DEVICES_LOG, [$columns=DevicesInfo, $ev=log_known_devices]);
}

View file

@ -40,15 +40,11 @@ export {
## The threshold of a unique number of hosts a scanning host has to have failed
## connections with on a single port.
const addr_scan_threshold = 25 &redef;
const addr_scan_threshold = 25.0 &redef;
## The threshold of a number of unique ports a scanning host has to have failed
## connections with on a single victim host.
const port_scan_threshold = 15 &redef;
## Custom thresholds based on service for address scan. This is primarily
## useful for setting reduced thresholds for specific ports.
const addr_scan_custom_thresholds: table[port] of count &redef;
const port_scan_threshold = 15.0 &redef;
global Scan::addr_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port);
global Scan::port_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port);
@ -57,11 +53,12 @@ export {
event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="scan.addr.fail", $apply=set(SumStats::UNIQUE)];
SumStats::create([$epoch=addr_scan_interval,
SumStats::create([$name="addr-scan",
$epoch=addr_scan_interval,
$reducers=set(r1),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["scan.addr.fail"]$unique);
return result["scan.addr.fail"]$unique+0.0;
},
#$threshold_func=check_addr_scan_threshold,
$threshold=addr_scan_threshold,
@ -81,11 +78,12 @@ event bro_init() &priority=5
# Note: port scans are tracked similar to: table[src_ip, dst_ip] of set(port);
local r2: SumStats::Reducer = [$stream="scan.port.fail", $apply=set(SumStats::UNIQUE)];
SumStats::create([$epoch=port_scan_interval,
SumStats::create([$name="port-scan",
$epoch=port_scan_interval,
$reducers=set(r2),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["scan.port.fail"]$unique);
return result["scan.port.fail"]$unique+0.0;
},
$threshold=port_scan_threshold,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =

View file

@ -0,0 +1,37 @@
##! Tracks MAC address with hostnames seen in DHCP traffic. They are logged into
##! ``devices.log``.
@load policy/misc/known-devices
module Known;
export {
redef record DevicesInfo += {
## The value of the DHCP host name option, if seen
dhcp_host_name: string &log &optional;
};
}
event dhcp_request(c: connection, msg: dhcp_msg, req_addr: addr, serv_addr: addr, host_name: string)
{
if ( msg$h_addr == "" )
return;
if ( msg$h_addr !in known_devices )
{
add known_devices[msg$h_addr];
Log::write(Known::DEVICES_LOG, [$ts=network_time(), $mac=msg$h_addr, $dhcp_host_name=host_name]);
}
}
event dhcp_inform(c: connection, msg: dhcp_msg, host_name: string)
{
if ( msg$h_addr == "" )
return;
if ( msg$h_addr !in known_devices )
{
add known_devices[msg$h_addr];
Log::write(Known::DEVICES_LOG, [$ts=network_time(), $mac=msg$h_addr, $dhcp_host_name=host_name]);
}
}

View file

@ -17,7 +17,7 @@ export {
## How many rejected usernames or passwords are required before being
## considered to be bruteforcing.
const bruteforce_threshold = 20 &redef;
const bruteforce_threshold: double = 20 &redef;
## The time period in which the threshold needs to be crossed before
## being reset.
@ -28,11 +28,12 @@ export {
event bro_init()
{
local r1: SumStats::Reducer = [$stream="ftp.failed_auth", $apply=set(SumStats::UNIQUE)];
SumStats::create([$epoch=bruteforce_measurement_interval,
SumStats::create([$name="ftp-detect-bruteforcing",
$epoch=bruteforce_measurement_interval,
$reducers=set(r1),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return result["ftp.failed_auth"]$num;
return result["ftp.failed_auth"]$num+0.0;
},
$threshold=bruteforce_threshold,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =

View file

@ -28,7 +28,7 @@ export {
## Defines the threshold that determines if an SQL injection attack
## is ongoing based on the number of requests that appear to be SQL
## injection attacks.
const sqli_requests_threshold = 50 &redef;
const sqli_requests_threshold: double = 50.0 &redef;
## Interval at which to watch for the
## :bro:id:`HTTP::sqli_requests_threshold` variable to be crossed.
@ -64,11 +64,12 @@ event bro_init() &priority=3
# determine when it looks like an actual attack and how to respond when
# thresholds are crossed.
local r1: SumStats::Reducer = [$stream="http.sqli.attacker", $apply=set(SumStats::SUM, SumStats::SAMPLE), $num_samples=collect_SQLi_samples];
SumStats::create([$epoch=sqli_requests_interval,
SumStats::create([$name="detect-sqli-attackers",
$epoch=sqli_requests_interval,
$reducers=set(r1),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["http.sqli.attacker"]$sum);
return result["http.sqli.attacker"]$sum;
},
$threshold=sqli_requests_threshold,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
@ -82,11 +83,12 @@ event bro_init() &priority=3
}]);
local r2: SumStats::Reducer = [$stream="http.sqli.victim", $apply=set(SumStats::SUM, SumStats::SAMPLE), $num_samples=collect_SQLi_samples];
SumStats::create([$epoch=sqli_requests_interval,
SumStats::create([$name="detect-sqli-victims",
$epoch=sqli_requests_interval,
$reducers=set(r2),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["http.sqli.victim"]$sum);
return result["http.sqli.victim"]$sum;
},
$threshold=sqli_requests_threshold,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =

View file

@ -27,7 +27,7 @@ export {
## The number of failed SSH connections before a host is designated as
## guessing passwords.
const password_guesses_limit = 30 &redef;
const password_guesses_limit: double = 30 &redef;
## The amount of time to remember presumed non-successful logins to build
## model of a password guesser.
@ -42,20 +42,29 @@ export {
event bro_init()
{
local r1: SumStats::Reducer = [$stream="ssh.login.failure", $apply=set(SumStats::SUM)];
SumStats::create([$epoch=guessing_timeout,
local r1: SumStats::Reducer = [$stream="ssh.login.failure", $apply=set(SumStats::SUM, SumStats::SAMPLE), $num_samples=5];
SumStats::create([$name="detect-ssh-bruteforcing",
$epoch=guessing_timeout,
$reducers=set(r1),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["ssh.login.failure"]$sum);
return result["ssh.login.failure"]$sum;
},
$threshold=password_guesses_limit,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
local r = result["ssh.login.failure"];
local sub_msg = fmt("Sampled servers: ");
local samples = r$samples;
for ( i in samples )
{
if ( samples[i]?$str )
sub_msg = fmt("%s%s %s", sub_msg, i==0 ? "":",", samples[i]$str);
}
# Generate the notice.
NOTICE([$note=Password_Guessing,
$msg=fmt("%s appears to be guessing SSH passwords (seen in %d connections).", key$host, r$num),
$sub=sub_msg,
$src=key$host,
$identifier=cat(key$host)]);
}]);
@ -78,5 +87,5 @@ event SSH::heuristic_failed_login(c: connection)
# be ignored.
if ( ! (id$orig_h in ignore_guessers &&
id$resp_h in ignore_guessers[id$orig_h]) )
SumStats::observe("ssh.login.failure", [$host=id$orig_h], [$num=1]);
SumStats::observe("ssh.login.failure", [$host=id$orig_h], [$str=cat(id$resp_h)]);
}

View file

@ -11,6 +11,13 @@
# Load the scan detection script.
@load misc/scan
# Log some information about web applications being used by users
# on your network.
@load misc/app-stats
# Detect traceroute being run on the network.
@load misc/detect-traceroute
# Generate notices when vulnerable versions of software are discovered.
# The default is to only monitor software found in the address space defined
# as "local". Refer to the software framework's documentation for more

View file

@ -35,10 +35,19 @@
@load integration/barnyard2/types.bro
@load integration/collective-intel/__load__.bro
@load integration/collective-intel/main.bro
@load misc/app-metrics.bro
@load misc/app-stats/__load__.bro
@load misc/app-stats/main.bro
@load misc/app-stats/plugins/__load__.bro
@load misc/app-stats/plugins/facebook.bro
@load misc/app-stats/plugins/gmail.bro
@load misc/app-stats/plugins/google.bro
@load misc/app-stats/plugins/netflix.bro
@load misc/app-stats/plugins/pandora.bro
@load misc/app-stats/plugins/youtube.bro
@load misc/capture-loss.bro
@load misc/detect-traceroute/__load__.bro
@load misc/detect-traceroute/main.bro
@load misc/known-devices.bro
@load misc/load-balancing.bro
@load misc/loaded-scripts.bro
@load misc/profiling.bro
@ -48,6 +57,7 @@
@load protocols/conn/known-hosts.bro
@load protocols/conn/known-services.bro
@load protocols/conn/weirds.bro
@load protocols/dhcp/known-devices-and-hostnames.bro
@load protocols/dns/auth-addl.bro
@load protocols/dns/detect-external-names.bro
@load protocols/ftp/detect-bruteforcing.bro

View file

@ -1,4 +1,3 @@
#include "DHCP.h"
#include "events.bif.h"

View file

@ -8,12 +8,10 @@ flow DHCP_Flow(is_orig: bool) {
%member{
BroVal dhcp_msg_val_;
BroAnalyzer interp;
%}
%init{
dhcp_msg_val_ = 0;
interp = connection->bro_analyzer();
%}
%cleanup{
@ -45,7 +43,7 @@ flow DHCP_Flow(is_orig: bool) {
}
if ( type == 0 )
interp->Weird("DHCP_no_type_option");
connection()->bro_analyzer()->ProtocolViolation("no DHCP message type option");
return type;
%}
@ -56,54 +54,63 @@ flow DHCP_Flow(is_orig: bool) {
// Requested IP address to the server.
::uint32 req_addr = 0, serv_addr = 0;
StringVal* host_name = 0;
for ( ptr = options->begin();
ptr != options->end() && ! (*ptr)->last(); ++ptr )
for ( ptr = options->begin(); ptr != options->end() && ! (*ptr)->last(); ++ptr )
{
switch ( (*ptr)->code() ) {
case REQ_IP_OPTION:
req_addr = htonl((*ptr)->info()->req_addr());
break;
switch ( (*ptr)->code() )
{
case REQ_IP_OPTION:
req_addr = htonl((*ptr)->info()->req_addr());
break;
case SERV_ID_OPTION:
serv_addr = htonl((*ptr)->info()->serv_addr());
break;
}
case SERV_ID_OPTION:
serv_addr = htonl((*ptr)->info()->serv_addr());
break;
case HOST_NAME_OPTION:
host_name = new StringVal((*ptr)->info()->host_name().length(),
(const char*) (*ptr)->info()->host_name().begin());
break;
}
}
if ( host_name == 0 )
host_name = new StringVal("");
switch ( type )
{
case DHCPDISCOVER:
BifEvent::generate_dhcp_discover(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref(), new AddrVal(req_addr));
break;
{
case DHCPDISCOVER:
BifEvent::generate_dhcp_discover(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref(), new AddrVal(req_addr), host_name);
break;
case DHCPREQUEST:
BifEvent::generate_dhcp_request(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref(), new AddrVal(req_addr),
new AddrVal(serv_addr));
break;
case DHCPREQUEST:
BifEvent::generate_dhcp_request(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref(), new AddrVal(req_addr),
new AddrVal(serv_addr), host_name);
break;
case DHCPDECLINE:
BifEvent::generate_dhcp_decline(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref());
break;
case DHCPDECLINE:
BifEvent::generate_dhcp_decline(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref(), host_name);
break;
case DHCPRELEASE:
BifEvent::generate_dhcp_release(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref());
break;
case DHCPRELEASE:
BifEvent::generate_dhcp_release(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref(), host_name);
break;
case DHCPINFORM:
BifEvent::generate_dhcp_inform(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref());
break;
}
case DHCPINFORM:
BifEvent::generate_dhcp_inform(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref(), host_name);
break;
}
return true;
%}
@ -118,72 +125,83 @@ flow DHCP_Flow(is_orig: bool) {
::uint32 subnet_mask = 0, serv_addr = 0;
uint32 lease = 0;
StringVal* host_name = 0;
for ( ptr = options->begin();
ptr != options->end() && ! (*ptr)->last(); ++ptr )
{
switch ( (*ptr)->code() ) {
case SUBNET_OPTION:
subnet_mask = htonl((*ptr)->info()->mask());
break;
case ROUTER_OPTION:
// Let's hope there aren't multiple
// such options.
Unref(router_list);
router_list = new TableVal(dhcp_router_list);
switch ( (*ptr)->code() )
{
int num_routers =
(*ptr)->info()->router_list()->size();
case SUBNET_OPTION:
subnet_mask = htonl((*ptr)->info()->mask());
break;
for ( int i = 0; i < num_routers; ++i )
{
vector<uint32>* rlist =
(*ptr)->info()->router_list();
uint32 raddr = (*rlist)[i];
::uint32 tmp_addr;
tmp_addr = htonl(raddr);
// index starting from 1
Val* index = new Val(i + 1, TYPE_COUNT);
router_list->Assign(index, new AddrVal(tmp_addr));
Unref(index);
}
case ROUTER_OPTION:
// Let's hope there aren't multiple
// such options.
Unref(router_list);
router_list = new TableVal(dhcp_router_list);
{
int num_routers = (*ptr)->info()->router_list()->size();
for ( int i = 0; i < num_routers; ++i )
{
vector<uint32>* rlist = (*ptr)->info()->router_list();
uint32 raddr = (*rlist)[i];
::uint32 tmp_addr;
tmp_addr = htonl(raddr);
// index starting from 1
Val* index = new Val(i + 1, TYPE_COUNT);
router_list->Assign(index, new AddrVal(tmp_addr));
Unref(index);
}
}
break;
case LEASE_OPTION:
lease = (*ptr)->info()->lease();
break;
case SERV_ID_OPTION:
serv_addr = htonl((*ptr)->info()->serv_addr());
break;
case HOST_NAME_OPTION:
host_name = new StringVal((*ptr)->info()->host_name().length(),
(const char*) (*ptr)->info()->host_name().begin());
break;
}
break;
case LEASE_OPTION:
lease = (*ptr)->info()->lease();
break;
case SERV_ID_OPTION:
serv_addr = htonl((*ptr)->info()->serv_addr());
break;
}
}
switch ( type ) {
case DHCPOFFER:
BifEvent::generate_dhcp_offer(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref(), new AddrVal(subnet_mask),
router_list, lease, new AddrVal(serv_addr));
break;
if ( host_name == 0 )
host_name = new StringVal("");
case DHCPACK:
BifEvent::generate_dhcp_ack(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref(), new AddrVal(subnet_mask),
router_list, lease, new AddrVal(serv_addr));
break;
switch ( type )
{
case DHCPOFFER:
BifEvent::generate_dhcp_offer(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref(), new AddrVal(subnet_mask),
router_list, lease, new AddrVal(serv_addr), host_name);
break;
case DHCPNAK:
BifEvent::generate_dhcp_nak(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref());
break;
case DHCPACK:
BifEvent::generate_dhcp_ack(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref(), new AddrVal(subnet_mask),
router_list, lease, new AddrVal(serv_addr), host_name);
break;
}
case DHCPNAK:
BifEvent::generate_dhcp_nak(connection()->bro_analyzer(),
connection()->bro_analyzer()->Conn(),
dhcp_msg_val_->Ref(), host_name);
break;
}
return true;
@ -195,48 +213,59 @@ flow DHCP_Flow(is_orig: bool) {
// DHCP or BOOTP. If not, we are unable to interpret
// the message options.
if ( ${msg.cookie} != 0x63825363 )
{
connection()->bro_analyzer()->ProtocolViolation(fmt("bad cookie (%d)", ${msg.cookie}));
return false;
}
Unref(dhcp_msg_val_);
RecordVal* r = new RecordVal(dhcp_msg);
const char* mac_str = fmt_mac(${msg.chaddr}.data(), ${msg.chaddr}.length());
RecordVal* r = new RecordVal(dhcp_msg);
r->Assign(0, new Val(${msg.op}, TYPE_COUNT));
r->Assign(1, new Val(${msg.type}, TYPE_COUNT));
r->Assign(2, new Val(${msg.xid}, TYPE_COUNT));
// We want only 6 bytes for Ethernet address.
r->Assign(3, new StringVal(6, (const char*) ${msg.chaddr}.begin()));
r->Assign(3, new StringVal(mac_str));
r->Assign(4, new AddrVal(${msg.ciaddr}));
r->Assign(5, new AddrVal(${msg.yiaddr}));
delete [] mac_str;
dhcp_msg_val_ = r;
switch ( ${msg.op} ) {
case BOOTREQUEST: // presumablye from client to server
if ( ${msg.type} == DHCPDISCOVER ||
${msg.type} == DHCPREQUEST ||
${msg.type} == DHCPDECLINE ||
${msg.type} == DHCPRELEASE ||
${msg.type} == DHCPINFORM )
parse_request(${msg.options}, ${msg.type});
else
interp->Weird("DHCP_wrong_msg_type");
break;
switch ( ${msg.op} )
{
case BOOTREQUEST: // presumably from client to server
if ( ${msg.type} == DHCPDISCOVER ||
${msg.type} == DHCPREQUEST ||
${msg.type} == DHCPDECLINE ||
${msg.type} == DHCPRELEASE ||
${msg.type} == DHCPINFORM )
parse_request(${msg.options}, ${msg.type});
else
connection()->bro_analyzer()->ProtocolViolation(fmt("unknown DHCP message type option for BOOTREQUEST (%d)",
${msg.type}));
break;
case BOOTREPLY: // presumably from server to client
if ( ${msg.type} == DHCPOFFER ||
${msg.type} == DHCPACK || ${msg.type} == DHCPNAK )
parse_reply(${msg.options}, ${msg.type});
else
interp->Weird("DHCP_wrong_msg_type");
break;
case BOOTREPLY: // presumably from server to client
if ( ${msg.type} == DHCPOFFER ||
${msg.type} == DHCPACK ||
${msg.type} == DHCPNAK )
parse_reply(${msg.options}, ${msg.type});
else
connection()->bro_analyzer()->ProtocolViolation(fmt("unknown DHCP message type option for BOOTREPLY (%d)",
${msg.type}));
default:
interp->Weird("DHCP_wrong_op_type");
break;
}
break;
default:
connection()->bro_analyzer()->ProtocolViolation(fmt("unknown DHCP message op code (%d). Known codes: 1=BOOTREQUEST, 2=BOOTREPLY",
${msg.op}));
break;
}
connection()->bro_analyzer()->ProtocolConfirmation();
return true;
%}
};

View file

@ -10,13 +10,14 @@ enum OP_type {
# The option types are by no means complete.
# Anyone can add a new option type in RFC 1533 to be parsed here.
enum OPTION_type {
SUBNET_OPTION = 1,
ROUTER_OPTION = 3,
REQ_IP_OPTION = 50,
LEASE_OPTION = 51,
MSG_TYPE_OPTION = 53,
SERV_ID_OPTION = 54, # Server address, actually :)
END_OPTION = 255,
SUBNET_OPTION = 1,
ROUTER_OPTION = 3,
HOST_NAME_OPTION = 12,
REQ_IP_OPTION = 50,
LEASE_OPTION = 51,
MSG_TYPE_OPTION = 53,
SERV_ID_OPTION = 54, # Server address, actually :)
END_OPTION = 255,
};
# Refer to RFC 1533 for message types (with option = 53).
@ -34,21 +35,22 @@ enum DHCP_message_type {
type Option_Info(code: uint8) = record {
length : uint8;
value : case code of {
SUBNET_OPTION -> mask : uint32;
ROUTER_OPTION -> router_list: uint32[length/4];
REQ_IP_OPTION -> req_addr : uint32;
LEASE_OPTION -> lease : uint32;
MSG_TYPE_OPTION -> msg_type : uint8;
SERV_ID_OPTION -> serv_addr: uint32;
default -> other: bytestring &length = length;
SUBNET_OPTION -> mask : uint32;
ROUTER_OPTION -> router_list : uint32[length/4];
REQ_IP_OPTION -> req_addr : uint32;
LEASE_OPTION -> lease : uint32;
MSG_TYPE_OPTION -> msg_type : uint8;
SERV_ID_OPTION -> serv_addr : uint32;
HOST_NAME_OPTION-> host_name : bytestring &length = length;
default -> other : bytestring &length = length;
};
};
type DHCP_Option = record {
code : uint8;
data : case code of {
0, 255 -> none : empty;
default -> info : Option_Info(code);
0, 255 -> none : empty;
default -> info : Option_Info(code);
};
} &let {
last: bool = (code == 255); # Mark the end of a list of options

View file

@ -1,3 +1,4 @@
%include binpac.pac
%include bro.pac
%extern{

View file

@ -1,8 +1,5 @@
## Generated for DHCP messages of type *discover*.
##
## See `Wikipedia
## <http://en.wikipedia.org/wiki/Dynamic_Host_Configuration_Protocol>`__ for
## more information about the DHCP protocol.
## Generated for DHCP messages of type *DHCPDISCOVER* (client broadcast to locate
## available servers).
##
## c: The connection record describing the underlying UDP flow.
##
@ -10,33 +7,23 @@
##
## req_addr: The specific address requested by the client.
##
## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl
## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply
## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end
## dns_full_request dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name
## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply
## dns_rejected dns_request non_dns_request dns_max_queries dns_session_timeout
## dns_skip_addl dns_skip_all_addl dns_skip_all_auth dns_skip_auth
## host_name: The value of the host name option, if specified by the client.
##
## .. bro:see:: dhcp_discover dhcp_offer dhcp_request dhcp_decline dhcp_ack dhcp_nak
## dhcp_release dhcp_inform
##
## .. note:: Bro does not support broadcast packets (as used by the DHCP
## protocol). It treats broadcast addresses just like any other and
## associates packets into transport-level flows in the same way as usual.
##
## .. todo:: Bro's current default configuration does not activate the protocol
## analyzer that generates this event; the corresponding script has not yet
## been ported to Bro 2.x. To still enable this event, one needs to
## register a port for it or add a DPD payload signature.
event dhcp_discover%(c: connection, msg: dhcp_msg, req_addr: addr%);
event dhcp_discover%(c: connection, msg: dhcp_msg, req_addr: addr, host_name: string%);
## Generated for DHCP messages of type *offer*.
##
## See `Wikipedia
## <http://en.wikipedia.org/wiki/Dynamic_Host_Configuration_Protocol>`__ for
## more information about the DHCP protocol.
## Generated for DHCP messages of type *DHCPOFFER* (server to client in response to
## DHCPDISCOVER with offer of configuration parameters).
##
## c: The connection record describing the underlying UDP flow.
##
## msg: TODO.
## msg: The parsed type-independent part of the DHCP message.
##
## mask: The subnet mask specified by the message.
##
@ -46,28 +33,21 @@ event dhcp_discover%(c: connection, msg: dhcp_msg, req_addr: addr%);
##
## serv_addr: The server address specified by the message.
##
## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl
## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply
## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end
## dns_full_request dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name
## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply
## dns_rejected dns_request non_dns_request
## host_name: The value of the host name option, if specified by the client.
##
## .. bro:see:: dhcp_discover dhcp_request dhcp_decline dhcp_ack dhcp_nak
## dhcp_release dhcp_inform
##
## .. note:: Bro does not support broadcast packets (as used by the DHCP
## protocol). It treats broadcast addresses just like any other and
## associates packets into transport-level flows in the same way as usual.
##
## .. todo:: Bro's current default configuration does not activate the protocol
## analyzer that generates this event; the corresponding script has not yet
## been ported to Bro 2.x. To still enable this event, one needs to
## register a port for it or add a DPD payload signature.
event dhcp_offer%(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_list, lease: interval, serv_addr: addr%);
event dhcp_offer%(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_list, lease: interval, serv_addr: addr, host_name: string%);
## Generated for DHCP messages of type *request*.
##
## See `Wikipedia
## <http://en.wikipedia.org/wiki/Dynamic_Host_Configuration_Protocol>`__ for
## more information about the DHCP protocol.
## Generated for DHCP messages of type *DHCPREQUEST* (Client message to servers either
## (a) requesting offered parameters from one server and implicitly declining offers
## from all others, (b) confirming correctness of previously allocated address after,
## e.g., system reboot, or (c) extending the lease on a particular network address.)
##
## c: The connection record describing the underlying UDP flow.
##
@ -77,55 +57,37 @@ event dhcp_offer%(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_
##
## serv_addr: The server address specified by the message.
##
## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl
## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply
## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end
## dns_full_request dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name
## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply
## dns_rejected dns_request non_dns_request
## host_name: The value of the host name option, if specified by the client.
##
## .. bro:see:: dhcp_discover dhcp_offer dhcp_decline dhcp_ack dhcp_nak
## dhcp_release dhcp_inform
##
## .. note:: Bro does not support broadcast packets (as used by the DHCP
## protocol). It treats broadcast addresses just like any other and
## associates packets into transport-level flows in the same way as usual.
##
## .. todo:: Bro's current default configuration does not activate the protocol
## analyzer that generates this event; the corresponding script has not yet
## been ported to Bro 2.x. To still enable this event, one needs to
## register a port for it or add a DPD payload signature.
event dhcp_request%(c: connection, msg: dhcp_msg, req_addr: addr, serv_addr: addr%);
event dhcp_request%(c: connection, msg: dhcp_msg, req_addr: addr, serv_addr: addr, host_name: string%);
## Generated for DHCP messages of type *decline*.
##
## See `Wikipedia
## <http://en.wikipedia.org/wiki/Dynamic_Host_Configuration_Protocol>`__ for
## more information about the DHCP protocol.
## Generated for DHCP messages of type *DHCPDECLINE* (Client to server indicating
## network address is already in use).
##
## c: The connection record describing the underlying UDP flow.
##
## msg: The parsed type-independent part of the DHCP message.
##
## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl
## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply
## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end
## dns_full_request dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name
## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply
## dns_rejected dns_request non_dns_request
## host_name: The value of the host name option, if specified by the client.
##
## .. bro:see:: dhcp_discover dhcp_offer dhcp_request dhcp_ack dhcp_nak
## dhcp_release dhcp_inform
##
## .. note:: Bro does not support broadcast packets (as used by the DHCP
## protocol). It treats broadcast addresses just like any other and
## associates packets into transport-level flows in the same way as usual.
##
## .. todo:: Bro's current default configuration does not activate the protocol
## analyzer that generates this event; the corresponding script has not yet
## been ported to Bro 2.x. To still enable this event, one needs to
## register a port for it or add a DPD payload signature.
event dhcp_decline%(c: connection, msg: dhcp_msg%);
event dhcp_decline%(c: connection, msg: dhcp_msg, host_name: string%);
## Generated for DHCP messages of type *acknowledgment*.
##
## See `Wikipedia
## <http://en.wikipedia.org/wiki/Dynamic_Host_Configuration_Protocol>`__ for
## more information about the DHCP protocol.
## Generated for DHCP messages of type *DHCPACK* (Server to client with configuration
## parameters, including committed network address).
##
## c: The connection record describing the underlying UDP flow.
##
@ -139,101 +101,62 @@ event dhcp_decline%(c: connection, msg: dhcp_msg%);
##
## serv_addr: The server address specified by the message.
##
## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl
## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply
## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end
## dns_full_request dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name
## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply
## dns_rejected dns_request non_dns_request
## host_name: The value of the host name option, if specified by the client.
##
## .. note:: Bro does not support broadcast packets (as used by the DHCP
## protocol). It treats broadcast addresses just like any other and
## associates packets into transport-level flows in the same way as usual.
## .. bro:see:: dhcp_discover dhcp_offer dhcp_request dhcp_decline dhcp_nak
## dhcp_release dhcp_inform
##
## .. todo:: Bro's current default configuration does not activate the protocol
## analyzer that generates this event; the corresponding script has not yet
## been ported to Bro 2.x. To still enable this event, one needs to
## register a port for it or add a DPD payload signature.
event dhcp_ack%(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_list, lease: interval, serv_addr: addr%);
event dhcp_ack%(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_list, lease: interval, serv_addr: addr, host_name: string%);
## Generated for DHCP messages of type *negative acknowledgment*.
##
## See `Wikipedia
## <http://en.wikipedia.org/wiki/Dynamic_Host_Configuration_Protocol>`__ for
## more information about the DHCP protocol.
## Generated for DHCP messages of type *DHCPNAK* (Server to client indicating client's
## notion of network address is incorrect (e.g., client has moved to new subnet) or
## client's lease has expired).
##
## c: The connection record describing the underlying UDP flow.
##
## msg: The parsed type-independent part of the DHCP message.
##
## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl
## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply
## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end
## dns_full_request dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name
## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply
## dns_rejected dns_request non_dns_request
## host_name: The value of the host name option, if specified by the client.
##
## .. bro:see:: dhcp_discover dhcp_offer dhcp_request dhcp_decline dhcp_ack dhcp_release
## dhcp_inform
##
## .. note:: Bro does not support broadcast packets (as used by the DHCP
## protocol). It treats broadcast addresses just like any other and
## associates packets into transport-level flows in the same way as usual.
##
## .. todo:: Bro's current default configuration does not activate the protocol
## analyzer that generates this event; the corresponding script has not yet
## been ported to Bro 2.x. To still enable this event, one needs to
## register a port for it or add a DPD payload signature.
event dhcp_nak%(c: connection, msg: dhcp_msg%);
event dhcp_nak%(c: connection, msg: dhcp_msg, host_name: string%);
## Generated for DHCP messages of type *release*.
##
## See `Wikipedia
## <http://en.wikipedia.org/wiki/Dynamic_Host_Configuration_Protocol>`__ for
## more information about the DHCP protocol.
## Generated for DHCP messages of type *DHCPRELEASE* (Client to server relinquishing
## network address and cancelling remaining lease).
##
## c: The connection record describing the underlying UDP flow.
##
## msg: The parsed type-independent part of the DHCP message.
##
## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl
## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply
## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end
## dns_full_request dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name
## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply
## dns_rejected dns_request non_dns_request
## host_name: The value of the host name option, if specified by the client.
##
## .. note:: Bro does not support broadcast packets (as used by the DHCP
## protocol). It treats broadcast addresses just like any other and
## associates packets into transport-level flows in the same way as usual.
## .. bro:see:: dhcp_discover dhcp_offer dhcp_request dhcp_decline dhcp_ack dhcp_nak
## dhcp_inform
##
## .. todo:: Bro's current default configuration does not activate the protocol
## analyzer that generates this event; the corresponding script has not yet
## been ported to Bro 2.x. To still enable this event, one needs to
## register a port for it or add a DPD payload signature.
event dhcp_release%(c: connection, msg: dhcp_msg%);
event dhcp_release%(c: connection, msg: dhcp_msg, host_name: string%);
## Generated for DHCP messages of type *inform*.
##
## See `Wikipedia
## <http://en.wikipedia.org/wiki/Dynamic_Host_Configuration_Protocol>`__ for
## more information about the DHCP protocol.
## Generated for DHCP messages of type *DHCPINFORM* (Client to server, asking only for
## local configuration parameters; client already has externally configured network
## address).
##
## c: The connection record describing the underlying UDP flow.
##
## msg: The parsed type-independent part of the DHCP message.
##
## .. bro:see:: dns_AAAA_reply dns_A_reply dns_CNAME_reply dns_EDNS_addl
## dns_HINFO_reply dns_MX_reply dns_NS_reply dns_PTR_reply dns_SOA_reply
## dns_SRV_reply dns_TSIG_addl dns_TXT_reply dns_WKS_reply dns_end
## dns_full_request dns_mapping_altered dns_mapping_lost_name dns_mapping_new_name
## dns_mapping_unverified dns_mapping_valid dns_message dns_query_reply
## dns_rejected dns_request non_dns_request
## host_name: The value of the host name option, if specified by the client.
##
## .. bro:see:: dhcp_discover dhcp_offer dhcp_request dhcp_decline dhcp_ack dhcp_nak
## dhcp_release
##
## .. note:: Bro does not support broadcast packets (as used by the DHCP
## protocol). It treats broadcast addresses just like any other and
## associates packets into transport-level flows in the same way as usual.
##
## .. todo:: Bro's current default configuration does not activate the protocol
## analyzer that generates this event; the corresponding script has not yet
## been ported to Bro 2.x. To still enable this event, one needs to
## register a port for it or add a DPD payload signature.
event dhcp_inform%(c: connection, msg: dhcp_msg%);
event dhcp_inform%(c: connection, msg: dhcp_msg, host_name: string%);

View file

@ -95,29 +95,32 @@ bool Raw::Execute()
else if ( childpid == 0 )
{
// we are the child.
close(pipes[stdout_in]);
dup2(pipes[stdout_out], stdout_fileno);
safe_close(pipes[stdout_in]);
if ( dup2(pipes[stdout_out], stdout_fileno) == -1 )
Error(Fmt("Error on dup2 stdout_out: %d", errno));
if ( stdin_towrite )
{
close(pipes[stdin_out]);
dup2(pipes[stdin_in], stdin_fileno);
safe_close(pipes[stdin_out]);
if ( dup2(pipes[stdin_in], stdin_fileno) == -1 )
Error(Fmt("Error on dup2 stdin_in: %d", errno));
}
if ( use_stderr )
{
close(pipes[stderr_in]);
dup2(pipes[stderr_out], stderr_fileno);
safe_close(pipes[stderr_in]);
if ( dup2(pipes[stderr_out], stderr_fileno) == -1 )
Error(Fmt("Error on dup2 stderr_out: %d", errno));
}
execl("/bin/sh", "sh", "-c", fname.c_str(), NULL);
execl("/bin/sh", "sh", "-c", fname.c_str(), (char*) NULL);
fprintf(stderr, "Exec failed :(......\n");
exit(255);
}
else
{
// we are the parent
close(pipes[stdout_out]);
safe_close(pipes[stdout_out]);
pipes[stdout_out] = -1;
if ( Info().mode == MODE_STREAM )
@ -125,7 +128,7 @@ bool Raw::Execute()
if ( stdin_towrite )
{
close(pipes[stdin_in]);
safe_close(pipes[stdin_in]);
pipes[stdin_in] = -1;
fcntl(pipes[stdin_out], F_SETFL, O_NONBLOCK); // ya, just always set this to nonblocking. we do not want to block on a program receiving data.
// note that there is a small gotcha with it. More data is queued when more data is read from the program output. Hence, when having
@ -134,7 +137,7 @@ bool Raw::Execute()
if ( use_stderr )
{
close(pipes[stderr_out]);
safe_close(pipes[stderr_out]);
pipes[stderr_out] = -1;
fcntl(pipes[stderr_in], F_SETFL, O_NONBLOCK); // true for this too.
}
@ -195,7 +198,10 @@ bool Raw::CloseInput()
{
for ( int i = 0; i < 6; i ++ )
if ( pipes[i] != -1 )
close(pipes[i]);
{
safe_close(pipes[i]);
pipes[i] = -1;
}
}
file = 0;
@ -393,11 +399,13 @@ void Raw::WriteToStdin()
{
Error(Fmt("Writing to child process stdin failed: %d. Stopping writing at position %d", errno, pos));
stdin_towrite = 0;
close(pipes[stdin_out]);
}
if ( stdin_towrite == 0 ) // send EOF when we are done.
close(pipes[stdin_out]);
{
safe_close(pipes[stdin_out]);
pipes[stdin_out] = -1;
}
if ( Info().mode == MODE_MANUAL && stdin_towrite != 0 )
{
@ -528,6 +536,7 @@ bool Raw::DoUpdate()
if ( childpid != -1 && waitpid(childpid, &return_code, WNOHANG) != 0 )
{
// child died
childpid = -1;
bool signal = false;
int code = 0;
if ( WIFEXITED(return_code) )
@ -539,7 +548,7 @@ bool Raw::DoUpdate()
else if ( WIFSIGNALED(return_code) )
{
signal = false;
signal = true;
code = WTERMSIG(return_code);
Error(Fmt("Child process exited due to signal %d", code));
}
@ -564,7 +573,7 @@ bool Raw::DoUpdate()
EndCurrentSend();
SendEvent("InputRaw::process_finished", 4, vals);
}
}

View file

@ -148,6 +148,26 @@ const char* fmt_conn_id(const uint32* src_addr, uint32 src_port,
return fmt_conn_id(src, src_port, dst, dst_port);
}
char* fmt_mac(const unsigned char* m, int len)
{
char* buf = new char[25];
if ( len < 8 )
{
*buf = '\0';
return buf;
}
if ( m[6] == 0 && m[7] == 0 ) // EUI-48
snprintf(buf, 19, "%02x:%02x:%02x:%02x:%02x:%02x",
m[0], m[1], m[2], m[3], m[4], m[5]);
else
snprintf(buf, 25, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7]);
return buf;
}
uint32 extract_uint32(const u_char* data)
{
uint32 val;

View file

@ -156,6 +156,18 @@ extern const char* fmt_conn_id(const IPAddr& src_addr, uint32 src_port,
extern const char* fmt_conn_id(const uint32* src_addr, uint32 src_port,
const uint32* dst_addr, uint32 dst_port);
/**
* Given a MAC address, formats it in hex as 00:de:ad:be:ef.
* Supports both EUI-48 and EUI-64. If it's neither, returns
* an empty string.
*
* @param m EUI-48 or EUI-64 MAC address to format, as a char array
* @param len Number of bytes valid starting at *n*. This must be at
* least 8 for a valid address.
* @return A string of the formatted MAC. Passes ownership to caller.
*/
extern char* fmt_mac(const unsigned char* m, int len);
// Read 4 bytes from data and return in network order.
extern uint32 extract_uint32(const u_char* data);

View file

@ -26,6 +26,8 @@
1 6667
1 6668
1 6669
1 67
1 68
1 80
1 8000
1 8080
@ -36,8 +38,8 @@
1 992
1 993
1 995
40 and
39 or
40 port
42 and
41 or
42 port
31 tcp
9 udp
11 udp

View file

@ -3,7 +3,7 @@
#empty_field (empty)
#unset_field -
#path conn
#open 2008-05-16-15-50-57
#open 2013-08-04-03-28-45
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents
#types time string addr port addr port enum string interval count count string bool count string count count count count table[string]
1210953047.736921 arKYeMETxOg 192.168.2.16 1576 75.126.130.163 80 tcp - 0.000357 0 0 SHR - 0 fA 1 40 1 40 (empty)
@ -21,10 +21,10 @@
1210953074.570439 c4Zw9TmAE05 192.168.2.16 1580 67.228.110.120 80 tcp http 0.466677 469 3916 SF - 0 ShADadFf 7 757 6 4164 (empty)
1210953052.202579 nQcgTWjvg4c 192.168.2.16 3797 65.55.158.80 3544 udp teredo 8.928880 129 48 SF - 0 Dd 2 185 1 76 (empty)
1210953060.829233 GSxOnSLghOa 192.168.2.16 3797 83.170.1.38 32900 udp teredo 13.293994 2359 11243 SF - 0 Dd 12 2695 13 11607 (empty)
1210953058.933954 iE6yhOq3SF 0.0.0.0 68 255.255.255.255 67 udp - - - - S0 - 0 D 1 328 0 0 (empty)
1210953058.933954 iE6yhOq3SF 0.0.0.0 68 255.255.255.255 67 udp dhcp - - - S0 - 0 D 1 328 0 0 (empty)
1210953052.324629 TEfuqmmG4bh 192.168.2.16 3797 65.55.158.81 3544 udp - - - - SHR - 0 d 0 0 1 137 (empty)
1210953046.591933 UWkUyAuUGXf 192.168.2.16 138 192.168.2.255 138 udp - 28.448321 416 0 S0 - 0 D 2 472 0 0 (empty)
1210953052.324629 FrJExwHcSal fe80::8000:f227:bec8:61af 134 fe80::8000:ffff:ffff:fffd 133 icmp - - - - OTH - 0 - 1 88 0 0 TEfuqmmG4bh
1210953060.829303 qCaWGmzFtM5 2001:0:4137:9e50:8000:f12a:b9c8:2815 128 2001:4860:0:2001::68 129 icmp - 0.463615 4 4 OTH - 0 - 1 52 1 52 GSxOnSLghOa,nQcgTWjvg4c
1210953052.202579 j4u32Pc5bif fe80::8000:ffff:ffff:fffd 133 ff02::2 134 icmp - - - - OTH - 0 - 1 64 0 0 nQcgTWjvg4c
#close 2008-05-16-15-51-16
#close 2013-08-04-03-28-45

View file

@ -3,7 +3,7 @@
#empty_field (empty)
#unset_field -
#path loaded_scripts
#open 2013-07-29-22-37-52
#open 2013-08-09-16-13-58
#fields name
#types string
scripts/base/init-bare.bro
@ -23,28 +23,28 @@ scripts/base/init-bare.bro
build/scripts/base/bif/plugins/Bro_DCE_RPC.events.bif.bro
build/scripts/base/bif/plugins/Bro_DHCP.events.bif.bro
build/scripts/base/bif/plugins/Bro_DNS.events.bif.bro
build/scripts/base/bif/plugins/Bro_FTP.events.bif.bro
build/scripts/base/bif/plugins/Bro_FTP.functions.bif.bro
build/scripts/base/bif/plugins/Bro_File.events.bif.bro
build/scripts/base/bif/plugins/Bro_FileHash.events.bif.bro
build/scripts/base/bif/plugins/Bro_Finger.events.bif.bro
build/scripts/base/bif/plugins/Bro_GTPv1.events.bif.bro
build/scripts/base/bif/plugins/Bro_FTP.events.bif.bro
build/scripts/base/bif/plugins/Bro_FTP.functions.bif.bro
build/scripts/base/bif/plugins/Bro_Gnutella.events.bif.bro
build/scripts/base/bif/plugins/Bro_GTPv1.events.bif.bro
build/scripts/base/bif/plugins/Bro_HTTP.events.bif.bro
build/scripts/base/bif/plugins/Bro_HTTP.functions.bif.bro
build/scripts/base/bif/plugins/Bro_ICMP.events.bif.bro
build/scripts/base/bif/plugins/Bro_IRC.events.bif.bro
build/scripts/base/bif/plugins/Bro_Ident.events.bif.bro
build/scripts/base/bif/plugins/Bro_InterConn.events.bif.bro
build/scripts/base/bif/plugins/Bro_IRC.events.bif.bro
build/scripts/base/bif/plugins/Bro_Login.events.bif.bro
build/scripts/base/bif/plugins/Bro_Login.functions.bif.bro
build/scripts/base/bif/plugins/Bro_MIME.events.bif.bro
build/scripts/base/bif/plugins/Bro_Modbus.events.bif.bro
build/scripts/base/bif/plugins/Bro_NCP.events.bif.bro
build/scripts/base/bif/plugins/Bro_NTP.events.bif.bro
build/scripts/base/bif/plugins/Bro_NetBIOS.events.bif.bro
build/scripts/base/bif/plugins/Bro_NetBIOS.functions.bif.bro
build/scripts/base/bif/plugins/Bro_NetFlow.events.bif.bro
build/scripts/base/bif/plugins/Bro_NTP.events.bif.bro
build/scripts/base/bif/plugins/Bro_PIA.events.bif.bro
build/scripts/base/bif/plugins/Bro_POP3.events.bif.bro
build/scripts/base/bif/plugins/Bro_RPC.events.bif.bro
@ -91,6 +91,7 @@ scripts/base/init-bare.bro
scripts/base/utils/site.bro
scripts/base/utils/patterns.bro
build/scripts/base/bif/__load__.bro
build/scripts/base/bif/top-k.bif.bro
scripts/policy/misc/loaded-scripts.bro
scripts/base/utils/paths.bro
#close 2013-07-29-22-37-52
#close 2013-08-09-16-13-58

View file

@ -3,7 +3,7 @@
#empty_field (empty)
#unset_field -
#path loaded_scripts
#open 2013-07-29-22-37-53
#open 2013-08-09-16-13-37
#fields name
#types string
scripts/base/init-bare.bro
@ -23,28 +23,28 @@ scripts/base/init-bare.bro
build/scripts/base/bif/plugins/Bro_DCE_RPC.events.bif.bro
build/scripts/base/bif/plugins/Bro_DHCP.events.bif.bro
build/scripts/base/bif/plugins/Bro_DNS.events.bif.bro
build/scripts/base/bif/plugins/Bro_FTP.events.bif.bro
build/scripts/base/bif/plugins/Bro_FTP.functions.bif.bro
build/scripts/base/bif/plugins/Bro_File.events.bif.bro
build/scripts/base/bif/plugins/Bro_FileHash.events.bif.bro
build/scripts/base/bif/plugins/Bro_Finger.events.bif.bro
build/scripts/base/bif/plugins/Bro_GTPv1.events.bif.bro
build/scripts/base/bif/plugins/Bro_FTP.events.bif.bro
build/scripts/base/bif/plugins/Bro_FTP.functions.bif.bro
build/scripts/base/bif/plugins/Bro_Gnutella.events.bif.bro
build/scripts/base/bif/plugins/Bro_GTPv1.events.bif.bro
build/scripts/base/bif/plugins/Bro_HTTP.events.bif.bro
build/scripts/base/bif/plugins/Bro_HTTP.functions.bif.bro
build/scripts/base/bif/plugins/Bro_ICMP.events.bif.bro
build/scripts/base/bif/plugins/Bro_IRC.events.bif.bro
build/scripts/base/bif/plugins/Bro_Ident.events.bif.bro
build/scripts/base/bif/plugins/Bro_InterConn.events.bif.bro
build/scripts/base/bif/plugins/Bro_IRC.events.bif.bro
build/scripts/base/bif/plugins/Bro_Login.events.bif.bro
build/scripts/base/bif/plugins/Bro_Login.functions.bif.bro
build/scripts/base/bif/plugins/Bro_MIME.events.bif.bro
build/scripts/base/bif/plugins/Bro_Modbus.events.bif.bro
build/scripts/base/bif/plugins/Bro_NCP.events.bif.bro
build/scripts/base/bif/plugins/Bro_NTP.events.bif.bro
build/scripts/base/bif/plugins/Bro_NetBIOS.events.bif.bro
build/scripts/base/bif/plugins/Bro_NetBIOS.functions.bif.bro
build/scripts/base/bif/plugins/Bro_NetFlow.events.bif.bro
build/scripts/base/bif/plugins/Bro_NTP.events.bif.bro
build/scripts/base/bif/plugins/Bro_PIA.events.bif.bro
build/scripts/base/bif/plugins/Bro_POP3.events.bif.bro
build/scripts/base/bif/plugins/Bro_RPC.events.bif.bro
@ -91,6 +91,7 @@ scripts/base/init-bare.bro
scripts/base/utils/site.bro
scripts/base/utils/patterns.bro
build/scripts/base/bif/__load__.bro
build/scripts/base/bif/top-k.bif.bro
scripts/base/init-default.bro
scripts/base/utils/active-http.bro
scripts/base/utils/exec.bro
@ -147,6 +148,7 @@ scripts/base/init-default.bro
scripts/base/frameworks/sumstats/plugins/std-dev.bro
scripts/base/frameworks/sumstats/plugins/variance.bro
scripts/base/frameworks/sumstats/plugins/sum.bro
scripts/base/frameworks/sumstats/plugins/topk.bro
scripts/base/frameworks/sumstats/plugins/unique.bro
scripts/base/frameworks/sumstats/non-cluster.bro
scripts/base/frameworks/tunnels/__load__.bro
@ -156,6 +158,10 @@ scripts/base/init-default.bro
scripts/base/protocols/conn/contents.bro
scripts/base/protocols/conn/inactivity.bro
scripts/base/protocols/conn/polling.bro
scripts/base/protocols/dhcp/__load__.bro
scripts/base/protocols/dhcp/consts.bro
scripts/base/protocols/dhcp/main.bro
scripts/base/protocols/dhcp/utils.bro
scripts/base/protocols/dns/__load__.bro
scripts/base/protocols/dns/consts.bro
scripts/base/protocols/dns/main.bro
@ -202,4 +208,4 @@ scripts/base/init-default.bro
scripts/base/files/extract/main.bro
scripts/base/misc/find-checksum-offloading.bro
scripts/policy/misc/loaded-scripts.bro
#close 2013-07-29-22-37-53
#close 2013-08-09-16-13-37

View file

@ -1,3 +1,3 @@
A test metric threshold was crossed with a value of: 101.0
End of epoch handler was called
101.0
End of epoch handler was called

View file

@ -0,0 +1,2 @@
SumStat key request
Host: 7.2.1.5 -> 145

View file

@ -0,0 +1,2 @@
Key request for 1.2.3.4
Host: 1.2.3.4 -> 42

View file

@ -0,0 +1,8 @@
Host: 6.5.4.3 Sampled observations: 2
[2, 5]
Host: 10.10.10.10 Sampled observations: 1
[5]
Host: 1.2.3.4 Sampled observations: 34
[5, 22, 52, 91, 94]
Host: 7.2.1.5 Sampled observations: 2
[1, 91]

View file

@ -1,18 +0,0 @@
1
1.2.3.4
10.10.10.10
2
2
34
6.5.4.3
7.2.1.5
[num=1, dbl=<uninitialized>, str=<uninitialized>]
[num=2, dbl=<uninitialized>, str=<uninitialized>]
[num=22, dbl=<uninitialized>, str=<uninitialized>]
[num=5, dbl=<uninitialized>, str=<uninitialized>]
[num=5, dbl=<uninitialized>, str=<uninitialized>]
[num=5, dbl=<uninitialized>, str=<uninitialized>]
[num=52, dbl=<uninitialized>, str=<uninitialized>]
[num=91, dbl=<uninitialized>, str=<uninitialized>]
[num=91, dbl=<uninitialized>, str=<uninitialized>]
[num=94, dbl=<uninitialized>, str=<uninitialized>]

View file

@ -0,0 +1,10 @@
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path dhcp
#open 2013-07-31-21-00-49
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p mac assigned_ip lease_time trans_id
#types time string addr port addr port string addr interval count
1370200444.371332 nQcgTWjvg4c 128.2.6.189 68 128.2.6.152 67 90:b1:1c:99:49:29 128.2.6.189 900.000000 1984
#close 2013-07-31-21-00-50

View file

@ -0,0 +1,10 @@
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path dhcp
#open 2013-08-03-01-18-52
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p mac assigned_ip lease_time trans_id
#types time string addr port addr port string addr interval count
1374432420.191205 UWkUyAuUGXf 128.2.6.122 68 128.2.6.152 67 90:b1:1c:99:49:29 128.2.6.122 0.000000 2754407505
#close 2013-08-03-01-18-52

View file

@ -3,5 +3,4 @@ test1, [exit_code=0, signal_exit=F, stdout=[done, exit, stop], stderr=<uninitial
[out2] = [insert more text here, and there]
}]
test2, [exit_code=1, signal_exit=F, stdout=[here's something on stdout, some more stdout, last stdout], stderr=[and some stderr, more stderr, last stderr], files=<uninitialized>]
test3, [exit_code=9, signal_exit=F, stdout=[FML], stderr=<uninitialized>, files=<uninitialized>]
test4, [exit_code=0, signal_exit=F, stdout=[hibye], stderr=<uninitialized>, files=<uninitialized>]

View file

@ -0,0 +1,11 @@
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path known_devices
#open 2013-07-31-21-27-41
#fields ts mac dhcp_host_name
#types time string string
1370200443.344965 90:b1:1c:99:49:29 btest.is.cool
1374432420.186878 90:b1:1c:99:49:29 (empty)
#close 2013-07-31-21-27-41

Binary file not shown.

Binary file not shown.

View file

@ -26,16 +26,16 @@ global n = 0;
event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="test", $apply=set(SumStats::SUM, SumStats::MIN, SumStats::MAX, SumStats::AVERAGE, SumStats::STD_DEV, SumStats::VARIANCE, SumStats::UNIQUE)];
SumStats::create([$epoch=5secs,
SumStats::create([$name="test",
$epoch=5secs,
$reducers=set(r1),
$epoch_finished(rt: SumStats::ResultTable) =
$epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) =
{
local r = result["test"];
print fmt("Host: %s - num:%d - sum:%.1f - avg:%.1f - max:%.1f - min:%.1f - var:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$average, r$max, r$min, r$variance, r$std_dev, r$unique);
},
$epoch_finished(ts: time) =
{
for ( key in rt )
{
local r = rt[key]["test"];
print fmt("Host: %s - num:%d - sum:%.1f - avg:%.1f - max:%.1f - min:%.1f - var:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$average, r$max, r$min, r$variance, r$std_dev, r$unique);
}
terminate();
}]);
}

View file

@ -39,6 +39,5 @@ event bro_init()
try = 0;
outfile = open("../out");
Input::add_event([$source="cat > ../test.txt |", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input", $fields=Val, $ev=line, $want_record=F, $config=config_strings]);
Input::remove("input");
Input::add_event([$source="cat |", $reader=Input::READER_RAW, $mode=Input::STREAM, $name="input2", $fields=Val, $ev=line, $want_record=F, $config=config_strings]);
}

View file

@ -23,16 +23,16 @@ global n = 0;
event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="test", $apply=set(SumStats::SUM, SumStats::MIN, SumStats::MAX, SumStats::AVERAGE, SumStats::STD_DEV, SumStats::VARIANCE, SumStats::UNIQUE)];
SumStats::create([$epoch=5secs,
SumStats::create([$name="test",
$epoch=5secs,
$reducers=set(r1),
$epoch_finished(rt: SumStats::ResultTable) =
$epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) =
{
local r = result["test"];
print fmt("Host: %s - num:%d - sum:%.1f - avg:%.1f - max:%.1f - min:%.1f - var:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$average, r$max, r$min, r$variance, r$std_dev, r$unique);
},
$epoch_finished(ts: time) =
{
for ( key in rt )
{
local r = rt[key]["test"];
print fmt("Host: %s - num:%d - sum:%.1f - avg:%.1f - max:%.1f - min:%.1f - var:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$average, r$max, r$min, r$variance, r$std_dev, r$unique);
}
terminate();
}]);
}

View file

@ -11,16 +11,14 @@ event bro_init() &priority=5
SumStats::MIN,
SumStats::STD_DEV,
SumStats::UNIQUE)];
SumStats::create([$epoch=3secs,
$reducers=set(r1),
$epoch_finished(data: SumStats::ResultTable) =
{
for ( key in data )
{
local r = data[key]["test.metric"];
print fmt("Host: %s - num:%d - sum:%.1f - var:%.1f - avg:%.1f - max:%.1f - min:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$variance, r$average, r$max, r$min, r$std_dev, r$unique);
}
}
SumStats::create([$name="test",
$epoch=3secs,
$reducers=set(r1),
$epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) =
{
local r = result["test.metric"];
print fmt("Host: %s - num:%d - sum:%.1f - var:%.1f - avg:%.1f - max:%.1f - min:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$variance, r$average, r$max, r$min, r$std_dev, r$unique);
}
]);
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=5]);

View file

@ -20,20 +20,23 @@ redef Log::default_rotation_interval = 0secs;
event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)];
SumStats::create([$epoch=10secs,
SumStats::create([$name="test",
$epoch=10secs,
$reducers=set(r1),
$epoch_finished(data: SumStats::ResultTable) =
{
print "End of epoch handler was called";
for ( res in data )
print data[res]["test.metric"]$sum;
terminate();
},
$epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) =
{
print result["test.metric"]$sum;
},
$epoch_finished(ts: time) =
{
print "End of epoch handler was called";
terminate();
},
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["test.metric"]$sum);
return result["test.metric"]$sum;
},
$threshold=100,
$threshold=100.0,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
print fmt("A test metric threshold was crossed with a value of: %.1f", result["test.metric"]$sum);

View file

@ -0,0 +1,96 @@
# @TEST-SERIALIZE: comm
#
# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT
# @TEST-EXEC: sleep 1
# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT
# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT
# @TEST-EXEC: btest-bg-wait 15
# @TEST-EXEC: btest-diff manager-1/.stdout
@TEST-START-FILE cluster-layout.bro
redef Cluster::nodes = {
["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1", "worker-2")],
["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $interface="eth0"],
["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $interface="eth1"],
};
@TEST-END-FILE
redef Log::default_rotation_interval = 0secs;
global n = 0;
event bro_init() &priority=5
{
local r1 = SumStats::Reducer($stream="test", $apply=set(SumStats::SUM, SumStats::MIN, SumStats::MAX, SumStats::AVERAGE, SumStats::STD_DEV, SumStats::VARIANCE, SumStats::UNIQUE));
SumStats::create([$name="test sumstat",
$epoch=1hr,
$reducers=set(r1)]);
}
event remote_connection_closed(p: event_peer)
{
terminate();
}
global ready_for_data: event();
redef Cluster::manager2worker_events += /^ready_for_data$/;
event ready_for_data()
{
if ( Cluster::node == "worker-1" )
{
SumStats::observe("test", [$host=1.2.3.4], [$num=34]);
SumStats::observe("test", [$host=1.2.3.4], [$num=30]);
SumStats::observe("test", [$host=6.5.4.3], [$num=1]);
SumStats::observe("test", [$host=7.2.1.5], [$num=54]);
}
if ( Cluster::node == "worker-2" )
{
SumStats::observe("test", [$host=1.2.3.4], [$num=75]);
SumStats::observe("test", [$host=1.2.3.4], [$num=30]);
SumStats::observe("test", [$host=7.2.1.5], [$num=91]);
SumStats::observe("test", [$host=10.10.10.10], [$num=5]);
}
}
event on_demand2()
{
local host = 7.2.1.5;
when ( local result = SumStats::request_key("test sumstat", [$host=host]) )
{
print "SumStat key request";
if ( "test" in result )
print fmt(" Host: %s -> %.0f", host, result["test"]$sum);
terminate();
}
}
event on_demand()
{
#when ( local results = SumStats::request("test sumstat") )
# {
# print "Complete SumStat request";
# print fmt(" Host: %s -> %.0f", 6.5.4.3, results[[$host=6.5.4.3]]["test"]$sum);
# print fmt(" Host: %s -> %.0f", 10.10.10.10, results[[$host=10.10.10.10]]["test"]$sum);
# print fmt(" Host: %s -> %.0f", 1.2.3.4, results[[$host=1.2.3.4]]["test"]$sum);
# print fmt(" Host: %s -> %.0f", 7.2.1.5, results[[$host=7.2.1.5]]["test"]$sum);
event on_demand2();
# }
}
global peer_count = 0;
event remote_connection_handshake_done(p: event_peer) &priority=-5
{
++peer_count;
if ( peer_count == 2 )
{
if ( Cluster::local_node_type() == Cluster::MANAGER )
event ready_for_data();
schedule 1sec { on_demand() };
}
}

View file

@ -0,0 +1,46 @@
# @TEST-EXEC: bro %INPUT
# @TEST-EXEC: btest-diff .stdout
redef exit_only_after_terminate=T;
## Requesting a full sumstats resulttable is not supported yet.
#event on_demand()
# {
# when ( local results = SumStats::request("test") )
# {
# print "Complete SumStat request";
# for ( key in results )
# {
# print fmt(" Host: %s -> %.0f", key$host, results[key]["test.reducer"]$sum);
# }
# }
# }
event on_demand_key()
{
local host = 1.2.3.4;
when ( local result = SumStats::request_key("test", [$host=host]) )
{
print fmt("Key request for %s", host);
print fmt(" Host: %s -> %.0f", host, result["test.reducer"]$sum);
terminate();
}
}
event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="test.reducer",
$apply=set(SumStats::SUM)];
SumStats::create([$name="test",
$epoch=1hr,
$reducers=set(r1)]);
# Seed some data but notice there are no callbacks defined in the sumstat!
SumStats::observe("test.reducer", [$host=1.2.3.4], [$num=42]);
SumStats::observe("test.reducer", [$host=4.3.2.1], [$num=7]);
#schedule 0.1 secs { on_demand() };
schedule 1 secs { on_demand_key() };
}

View file

@ -5,8 +5,7 @@
# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT
# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT
# @TEST-EXEC: btest-bg-wait 15
# @TEST-EXEC: cat manager-1/.stdout | sort > out
# @TEST-EXEC: btest-diff out
# @TEST-EXEC: btest-diff manager-1/.stdout
@TEST-START-FILE cluster-layout.bro
redef Cluster::nodes = {
@ -18,25 +17,24 @@ redef Cluster::nodes = {
redef Log::default_rotation_interval = 0secs;
global n = 0;
event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="test", $apply=set(SumStats::SAMPLE), $num_samples=5];
SumStats::create([$epoch=5secs,
SumStats::create([$name="test",
$epoch=5secs,
$reducers=set(r1),
$epoch_finished(rt: SumStats::ResultTable) =
$epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) =
{
for ( key in rt )
{
print key$host;
local r = rt[key]["test"];
for ( sample in r$samples ) {
print r$samples[sample];
}
print r$sample_elements;
}
local r = result["test"];
print fmt("Host: %s Sampled observations: %d", key$host, r$sample_elements);
local sample_nums: vector of count = vector();
for ( sample in r$samples )
sample_nums[|sample_nums|] =r$samples[sample]$num;
print fmt(" %s", sort(sample_nums));
},
$epoch_finished(ts: time) =
{
terminate();
}]);
}

View file

@ -5,19 +5,16 @@ event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="test.metric",
$apply=set(SumStats::SAMPLE), $num_samples=2];
SumStats::create([$epoch=3secs,
$reducers=set(r1),
$epoch_finished(data: SumStats::ResultTable) =
{
for ( key in data )
{
print key$host;
local r = data[key]["test.metric"];
print r$samples;
print r$sample_elements;
}
}
]);
SumStats::create([$name="test",
$epoch=3secs,
$reducers=set(r1),
$epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) =
{
print key$host;
local r = result["test.metric"];
print r$samples;
print r$sample_elements;
}]);
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=5]);
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=22]);

View file

@ -8,14 +8,15 @@ redef enum Notice::Type += {
event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)];
SumStats::create([$epoch=3secs,
SumStats::create([$name="test1",
$epoch=3secs,
$reducers=set(r1),
#$threshold_val = SumStats::sum_threshold("test.metric"),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["test.metric"]$sum);
return result["test.metric"]$sum;
},
$threshold=5,
$threshold=5.0,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
local r = result["test.metric"];
@ -24,14 +25,15 @@ event bro_init() &priority=5
]);
local r2: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)];
SumStats::create([$epoch=3secs,
SumStats::create([$name="test2",
$epoch=3secs,
$reducers=set(r2),
#$threshold_val = SumStats::sum_threshold("test.metric"),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["test.metric"]$sum);
return result["test.metric"]$sum;
},
$threshold_series=vector(3,6,800),
$threshold_series=vector(3.0,6.0,800.0),
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
local r = result["test.metric"];
@ -41,19 +43,20 @@ event bro_init() &priority=5
local r3: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)];
local r4: SumStats::Reducer = [$stream="test.metric2", $apply=set(SumStats::SUM)];
SumStats::create([$epoch=3secs,
SumStats::create([$name="test3",
$epoch=3secs,
$reducers=set(r3, r4),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
# Calculate a ratio between sums of two reducers.
if ( "test.metric2" in result && "test.metric" in result &&
result["test.metric"]$sum > 0 )
return double_to_count(result["test.metric2"]$sum / result["test.metric"]$sum);
return result["test.metric2"]$sum / result["test.metric"]$sum;
else
return 0;
return 0.0;
},
# Looking for metric2 sum to be 5 times the sum of metric
$threshold=5,
$threshold=5.0,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
local thold = result["test.metric2"]$sum / result["test.metric"]$sum;

View file

@ -23,27 +23,24 @@ event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="test.metric",
$apply=set(SumStats::TOPK)];
SumStats::create([$epoch=5secs,
$reducers=set(r1),
$epoch_finished(data: SumStats::ResultTable) =
{
for ( key in data )
{
local r = data[key]["test.metric"];
local s: vector of SumStats::Observation;
s = topk_get_top(r$topk, 5);
print fmt("Top entries for key %s", key$str);
for ( element in s )
{
print fmt("Num: %d, count: %d, epsilon: %d", s[element]$num, topk_count(r$topk, s[element]), topk_epsilon(r$topk, s[element]));
}
terminate();
}
}
]);
SumStats::create([$name="topk-test",
$epoch=5secs,
$reducers=set(r1),
$epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) =
{
local r = result["test.metric"];
local s: vector of SumStats::Observation;
s = topk_get_top(r$topk, 5);
print fmt("Top entries for key %s", key$str);
for ( element in s )
{
print fmt("Num: %d, count: %d, epsilon: %d", s[element]$num, topk_count(r$topk, s[element]), topk_epsilon(r$topk, s[element]));
}
},
$epoch_finished(ts: time) =
{
terminate();
}]);
}

View file

@ -5,26 +5,21 @@ event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="test.metric",
$apply=set(SumStats::TOPK)];
SumStats::create([$epoch=3secs,
$reducers=set(r1),
$epoch_finished(data: SumStats::ResultTable) =
{
for ( key in data )
{
local r = data[key]["test.metric"];
SumStats::create([$name="topk-test",
$epoch=3secs,
$reducers=set(r1),
$epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) =
{
local r = result["test.metric"];
local s: vector of SumStats::Observation;
s = topk_get_top(r$topk, 5);
local s: vector of SumStats::Observation;
s = topk_get_top(r$topk, 5);
print fmt("Top entries for key %s", key$str);
for ( element in s )
{
print fmt("Num: %d, count: %d, epsilon: %d", s[element]$num, topk_count(r$topk, s[element]), topk_epsilon(r$topk, s[element]));
}
}
}
]);
print fmt("Top entries for key %s", key$str);
for ( element in s )
{
print fmt("Num: %d, count: %d, epsilon: %d", s[element]$num, topk_count(r$topk, s[element]), topk_epsilon(r$topk, s[element]));
}
}]);
const loop_v: vector of count = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100};

View file

@ -0,0 +1,6 @@
# This tests that DHCP leases are logged in dhcp.log
# The trace has a message of each DHCP message type,
# but only one lease should show up in the logs.
# @TEST-EXEC: bro -r $TRACES/dhcp/dhcp.trace %INPUT
# @TEST-EXEC: btest-diff dhcp.log

View file

@ -0,0 +1,5 @@
# DHCPINFORM leases are special-cased in the code.
# This tests that those leases are correctly logged.
# @TEST-EXEC: bro -r $TRACES/dhcp/dhcp_inform.trace %INPUT
# @TEST-EXEC: btest-diff dhcp.log

View file

@ -1,4 +1,3 @@
# @TEST-REQUIRES: which httpd
# @TEST-REQUIRES: which python
#
# @TEST-EXEC: btest-bg-run httpd python $SCRIPTS/httpd.py --max 1
@ -8,7 +7,7 @@
# @TEST-EXEC: btest-diff bro/.stdout
@load base/utils/active-http
@load base/frameworks/communication # let network-time run. otherwise there are no heartbeats...
redef exit_only_after_terminate = T;
event bro_init()

View file

@ -1,11 +1,11 @@
# @TEST-EXEC: btest-bg-run bro bro -b ../dirtest.bro
# @TEST-EXEC: btest-bg-wait 10
# @TEST-EXEC: btest-bg-wait 15
# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff bro/.stdout
@TEST-START-FILE dirtest.bro
@load base/utils/dir
@load base/frameworks/communication # let network-time run. otherwise there are no heartbeats...
redef exit_only_after_terminate = T;
global c: count = 0;
@ -33,14 +33,20 @@ function new_file2(fname: string)
event change_things()
{
system("touch ../testdir/newone");
system("rm ../testdir/bye && touch ../testdir/bye");
system("rm ../testdir/bye");
}
event change_things2()
{
system("touch ../testdir/bye");
}
event bro_init()
{
Dir::monitor("../testdir", new_file1, .5sec);
Dir::monitor("../testdir", new_file2, 1sec);
schedule 1sec { change_things() };
schedule 3sec { change_things() };
schedule 6sec { change_things2() };
}
@TEST-END-FILE

View file

@ -1,11 +1,11 @@
# @TEST-EXEC: btest-bg-run bro bro -b ../exectest.bro
# @TEST-EXEC: btest-bg-wait 10
# @TEST-EXEC: btest-bg-wait 15
# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff bro/.stdout
@TEST-START-FILE exectest.bro
@load base/utils/exec
@load base/frameworks/communication # let network-time run. otherwise there are no heartbeats...
redef exit_only_after_terminate = T;
global c: count = 0;
@ -14,7 +14,7 @@ function check_exit_condition()
{
c += 1;
if ( c == 4 )
if ( c == 3 )
terminate();
}
@ -32,7 +32,8 @@ event bro_init()
test_cmd("test1", [$cmd="bash ../somescript.sh",
$read_files=set("out1", "out2")]);
test_cmd("test2", [$cmd="bash ../nofiles.sh"]);
test_cmd("test3", [$cmd="bash ../suicide.sh"]);
# Not sure of a portable way to test signals yet.
#test_cmd("test3", [$cmd="bash ../suicide.sh"]);
test_cmd("test4", [$cmd="bash ../stdin.sh", $stdin="hibye"]);
}

View file

@ -0,0 +1,8 @@
# This tests that the known_devices log is created,
# that devices are logged by MAC address, and that
# the DHCP hostname is added, if available.
# @TEST-EXEC: bro -r $TRACES/dhcp/dhcp.trace -r $TRACES/dhcp/dhcp_inform.trace %INPUT
# @TEST-EXEC: btest-diff known_devices.log
@load policy/protocols/dhcp/known-devices-and-hostnames