Merge branch 'master' into topic/jsiwek/file-analysis

Conflicts:
	scripts/base/protocols/ftp/main.bro
	src/OpaqueVal.h
	testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log
	testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log
This commit is contained in:
Jon Siwek 2013-05-06 10:21:16 -05:00
commit ec50cad9db
90 changed files with 3004 additions and 1415 deletions

53
CHANGES
View file

@ -1,4 +1,57 @@
2.1-498 | 2013-05-03 17:44:08 -0700
* Table lookups return copy of non-const &default vals. This
prevents unintentional modifications to the &default value itself.
Addresses #981. (Jon Siwek)
2.1-496 | 2013-05-03 15:54:47 -0700
* Fix memory leak and unnecessary allocations in OpaqueVal.
Addresses #986. (Matthias Vallentin)
2.1-492 | 2013-05-02 12:46:26 -0700
* Work-around for sumstats framework not propagating updates after
intermediate check in cluster environments. (Bernhard Amann)
* Always apply tcp_connection_attempt. Before this change it was
only applied when a connection_attempt() event handler was
defined. (Robin Sommer)
* Fixing coverage.bare-mode-errors test. (Robin Sommer)
2.1-487 | 2013-05-01 18:03:22 -0700
* Always apply tcp_connection_attempt timer, even if no
connection_attempt() event handler is defined. (Robin Sommer)
2.1-486 | 2013-05-01 15:28:45 -0700
* New framework for computing summary statistics in
base/framework/sumstats. This replaces the metrics frameworks, and
comes with a number of applications build on top, see NEWS. More
documentation to follow. (Seth Hall)
2.1-397 | 2013-04-29 21:19:00 -0700
* Fixing memory leaks in CompHash implementation. Addresses #987.
(Robin Sommer)
2.1-394 | 2013-04-27 15:02:31 -0700
* Fixed a bug in the vulnerable software script and added a test.
(Seth Hall)
* Fix schedule statements used outside event handlers. Addresses
#974. (Jon Siwek)
* Fix record coercion for default inner record fields. Addresses
#973. (Jon Siwek)
* Add bytestring_to_count function to bro.bif. Addresses #968. (Yun
Zheng Hu)
2.1-386 | 2013-03-22 12:41:50 -0700
* Added reverse() function to strings.bif. (Yun Zheng Hu)

View file

@ -1,144 +0,0 @@
# DO NOT EDIT
# This file is auto-generated from the genDocSourcesList.sh script.
#
# This is a list of Bro script sources for which to generate reST documentation.
# It will be included inline in the CMakeLists.txt found in the same directory
# in order to create Makefile targets that define how to generate reST from
# a given Bro script.
#
# Note: any path prefix of the script (2nd argument of rest_target macro)
# will be used to derive what path under scripts/ the generated documentation
# will be placed.
set(psd ${PROJECT_SOURCE_DIR}/scripts)
rest_target(${CMAKE_CURRENT_SOURCE_DIR} example.bro internal)
rest_target(${psd} base/init-default.bro internal)
rest_target(${psd} base/init-bare.bro internal)
rest_target(${CMAKE_BINARY_DIR}/src base/bro.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/const.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/event.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/logging.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/reporter.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/strings.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/types.bif.bro)
rest_target(${psd} base/frameworks/cluster/main.bro)
rest_target(${psd} base/frameworks/cluster/nodes/manager.bro)
rest_target(${psd} base/frameworks/cluster/nodes/proxy.bro)
rest_target(${psd} base/frameworks/cluster/nodes/worker.bro)
rest_target(${psd} base/frameworks/cluster/setup-connections.bro)
rest_target(${psd} base/frameworks/communication/main.bro)
rest_target(${psd} base/frameworks/control/main.bro)
rest_target(${psd} base/frameworks/dpd/main.bro)
rest_target(${psd} base/frameworks/intel/main.bro)
rest_target(${psd} base/frameworks/logging/main.bro)
rest_target(${psd} base/frameworks/logging/postprocessors/scp.bro)
rest_target(${psd} base/frameworks/logging/postprocessors/sftp.bro)
rest_target(${psd} base/frameworks/logging/writers/ascii.bro)
rest_target(${psd} base/frameworks/logging/writers/dataseries.bro)
rest_target(${psd} base/frameworks/metrics/cluster.bro)
rest_target(${psd} base/frameworks/metrics/main.bro)
rest_target(${psd} base/frameworks/metrics/non-cluster.bro)
rest_target(${psd} base/frameworks/notice/actions/add-geodata.bro)
rest_target(${psd} base/frameworks/notice/actions/drop.bro)
rest_target(${psd} base/frameworks/notice/actions/email_admin.bro)
rest_target(${psd} base/frameworks/notice/actions/page.bro)
rest_target(${psd} base/frameworks/notice/actions/pp-alarms.bro)
rest_target(${psd} base/frameworks/notice/cluster.bro)
rest_target(${psd} base/frameworks/notice/extend-email/hostnames.bro)
rest_target(${psd} base/frameworks/notice/main.bro)
rest_target(${psd} base/frameworks/notice/weird.bro)
rest_target(${psd} base/frameworks/packet-filter/main.bro)
rest_target(${psd} base/frameworks/packet-filter/netstats.bro)
rest_target(${psd} base/frameworks/reporter/main.bro)
rest_target(${psd} base/frameworks/signatures/main.bro)
rest_target(${psd} base/frameworks/software/main.bro)
rest_target(${psd} base/protocols/conn/contents.bro)
rest_target(${psd} base/protocols/conn/inactivity.bro)
rest_target(${psd} base/protocols/conn/main.bro)
rest_target(${psd} base/protocols/dns/consts.bro)
rest_target(${psd} base/protocols/dns/main.bro)
rest_target(${psd} base/protocols/ftp/file-extract.bro)
rest_target(${psd} base/protocols/ftp/main.bro)
rest_target(${psd} base/protocols/ftp/utils-commands.bro)
rest_target(${psd} base/protocols/http/file-extract.bro)
rest_target(${psd} base/protocols/http/file-hash.bro)
rest_target(${psd} base/protocols/http/file-ident.bro)
rest_target(${psd} base/protocols/http/main.bro)
rest_target(${psd} base/protocols/http/utils.bro)
rest_target(${psd} base/protocols/irc/dcc-send.bro)
rest_target(${psd} base/protocols/irc/main.bro)
rest_target(${psd} base/protocols/smtp/entities-excerpt.bro)
rest_target(${psd} base/protocols/smtp/entities.bro)
rest_target(${psd} base/protocols/smtp/main.bro)
rest_target(${psd} base/protocols/ssh/main.bro)
rest_target(${psd} base/protocols/ssl/consts.bro)
rest_target(${psd} base/protocols/ssl/main.bro)
rest_target(${psd} base/protocols/ssl/mozilla-ca-list.bro)
rest_target(${psd} base/protocols/syslog/consts.bro)
rest_target(${psd} base/protocols/syslog/main.bro)
rest_target(${psd} base/utils/addrs.bro)
rest_target(${psd} base/utils/conn-ids.bro)
rest_target(${psd} base/utils/directions-and-hosts.bro)
rest_target(${psd} base/utils/files.bro)
rest_target(${psd} base/utils/numbers.bro)
rest_target(${psd} base/utils/paths.bro)
rest_target(${psd} base/utils/patterns.bro)
rest_target(${psd} base/utils/site.bro)
rest_target(${psd} base/utils/strings.bro)
rest_target(${psd} base/utils/thresholds.bro)
rest_target(${psd} policy/frameworks/communication/listen.bro)
rest_target(${psd} policy/frameworks/control/controllee.bro)
rest_target(${psd} policy/frameworks/control/controller.bro)
rest_target(${psd} policy/frameworks/dpd/detect-protocols.bro)
rest_target(${psd} policy/frameworks/dpd/packet-segment-logging.bro)
rest_target(${psd} policy/frameworks/metrics/conn-example.bro)
rest_target(${psd} policy/frameworks/metrics/http-example.bro)
rest_target(${psd} policy/frameworks/metrics/ssl-example.bro)
rest_target(${psd} policy/frameworks/software/version-changes.bro)
rest_target(${psd} policy/frameworks/software/vulnerable.bro)
rest_target(${psd} policy/integration/barnyard2/main.bro)
rest_target(${psd} policy/integration/barnyard2/types.bro)
rest_target(${psd} policy/misc/analysis-groups.bro)
rest_target(${psd} policy/misc/capture-loss.bro)
rest_target(${psd} policy/misc/loaded-scripts.bro)
rest_target(${psd} policy/misc/profiling.bro)
rest_target(${psd} policy/misc/stats.bro)
rest_target(${psd} policy/misc/trim-trace-file.bro)
rest_target(${psd} policy/protocols/conn/known-hosts.bro)
rest_target(${psd} policy/protocols/conn/known-services.bro)
rest_target(${psd} policy/protocols/conn/weirds.bro)
rest_target(${psd} policy/protocols/dns/auth-addl.bro)
rest_target(${psd} policy/protocols/dns/detect-external-names.bro)
rest_target(${psd} policy/protocols/ftp/detect.bro)
rest_target(${psd} policy/protocols/ftp/software.bro)
rest_target(${psd} policy/protocols/http/detect-MHR.bro)
rest_target(${psd} policy/protocols/http/detect-intel.bro)
rest_target(${psd} policy/protocols/http/detect-sqli.bro)
rest_target(${psd} policy/protocols/http/detect-webapps.bro)
rest_target(${psd} policy/protocols/http/header-names.bro)
rest_target(${psd} policy/protocols/http/software-browser-plugins.bro)
rest_target(${psd} policy/protocols/http/software.bro)
rest_target(${psd} policy/protocols/http/var-extraction-cookies.bro)
rest_target(${psd} policy/protocols/http/var-extraction-uri.bro)
rest_target(${psd} policy/protocols/smtp/blocklists.bro)
rest_target(${psd} policy/protocols/smtp/detect-suspicious-orig.bro)
rest_target(${psd} policy/protocols/smtp/software.bro)
rest_target(${psd} policy/protocols/ssh/detect-bruteforcing.bro)
rest_target(${psd} policy/protocols/ssh/geo-data.bro)
rest_target(${psd} policy/protocols/ssh/interesting-hostnames.bro)
rest_target(${psd} policy/protocols/ssh/software.bro)
rest_target(${psd} policy/protocols/ssl/cert-hash.bro)
rest_target(${psd} policy/protocols/ssl/expiring-certs.bro)
rest_target(${psd} policy/protocols/ssl/extract-certs-pem.bro)
rest_target(${psd} policy/protocols/ssl/known-certs.bro)
rest_target(${psd} policy/protocols/ssl/validate-certs.bro)
rest_target(${psd} policy/tuning/defaults/packet-fragments.bro)
rest_target(${psd} policy/tuning/defaults/warnings.bro)
rest_target(${psd} policy/tuning/track-all-assets.bro)
rest_target(${psd} site/local-manager.bro)
rest_target(${psd} site/local-proxy.bro)
rest_target(${psd} site/local-worker.bro)
rest_target(${psd} site/local.bro)
rest_target(${psd} test-all-policy.bro)

24
NEWS
View file

@ -126,6 +126,9 @@ Changed Functionality
- Removed the byte_len() and length() bif functions. Use the "|...|"
operator instead.
- The SSH::Login notice has been superseded by an corresponding
intelligence framework observation (SSH::SUCCESSFUL_LOGIN).
Bro 2.1
-------
@ -209,6 +212,27 @@ New Functionality
outputs. We do not yet recommend them for production (but welcome
feedback!)
- Summary statistics framework. [Extend]
- A number of new applications build on top of the summary statistics
framework:
* Scan detection: Detectors for port and address scans return. See
policy/misc/scan.bro.
* Tracerouter detector: policy/misc/detect-traceroute
* Web application detection/measurement: policy/misc/app-metrics.bro
* FTP brute-forcing detector: policy/protocols/ftp/detect-bruteforcing.bro
* HTTP-based SQL injection detector: policy/protocols/http/detect-sqli.bro
(existed before, but now ported to the new framework)
* SSH brute-forcing detector feeding the intelligence framework:
policy/protocols/ssh/detect-bruteforcing.bro
Changed Functionality
~~~~~~~~~~~~~~~~~~~~~

View file

@ -1 +1 @@
2.1-386
2.1-498

@ -1 +1 @@
Subproject commit 72d121ade5a37df83d3252646de51cb77ce69a89
Subproject commit a4b8dd0b691c3f614537ad8471fc80a82ce7b2df

@ -1 +1 @@
Subproject commit 2b35d0331366865fbf0119919cc9692d55c4538c
Subproject commit 786b83664c6a15faeb153d118310526b7790deae

View file

@ -49,9 +49,6 @@ rest_target(${psd} base/frameworks/logging/writers/ascii.bro)
rest_target(${psd} base/frameworks/logging/writers/dataseries.bro)
rest_target(${psd} base/frameworks/logging/writers/elasticsearch.bro)
rest_target(${psd} base/frameworks/logging/writers/none.bro)
rest_target(${psd} base/frameworks/metrics/cluster.bro)
rest_target(${psd} base/frameworks/metrics/main.bro)
rest_target(${psd} base/frameworks/metrics/non-cluster.bro)
rest_target(${psd} base/frameworks/notice/actions/add-geodata.bro)
rest_target(${psd} base/frameworks/notice/actions/drop.bro)
rest_target(${psd} base/frameworks/notice/actions/email_admin.bro)
@ -67,6 +64,17 @@ rest_target(${psd} base/frameworks/packet-filter/netstats.bro)
rest_target(${psd} base/frameworks/reporter/main.bro)
rest_target(${psd} base/frameworks/signatures/main.bro)
rest_target(${psd} base/frameworks/software/main.bro)
rest_target(${psd} base/frameworks/sumstats/cluster.bro)
rest_target(${psd} base/frameworks/sumstats/main.bro)
rest_target(${psd} base/frameworks/sumstats/non-cluster.bro)
rest_target(${psd} base/frameworks/sumstats/plugins/average.bro)
rest_target(${psd} base/frameworks/sumstats/plugins/max.bro)
rest_target(${psd} base/frameworks/sumstats/plugins/min.bro)
rest_target(${psd} base/frameworks/sumstats/plugins/sample.bro)
rest_target(${psd} base/frameworks/sumstats/plugins/std-dev.bro)
rest_target(${psd} base/frameworks/sumstats/plugins/sum.bro)
rest_target(${psd} base/frameworks/sumstats/plugins/unique.bro)
rest_target(${psd} base/frameworks/sumstats/plugins/variance.bro)
rest_target(${psd} base/frameworks/tunnels/main.bro)
rest_target(${psd} base/misc/find-checksum-offloading.bro)
rest_target(${psd} base/protocols/conn/contents.bro)
@ -110,9 +118,11 @@ rest_target(${psd} base/utils/files.bro)
rest_target(${psd} base/utils/numbers.bro)
rest_target(${psd} base/utils/paths.bro)
rest_target(${psd} base/utils/patterns.bro)
rest_target(${psd} base/utils/queue.bro)
rest_target(${psd} base/utils/site.bro)
rest_target(${psd} base/utils/strings.bro)
rest_target(${psd} base/utils/thresholds.bro)
rest_target(${psd} base/utils/time.bro)
rest_target(${psd} base/utils/urls.bro)
rest_target(${psd} policy/frameworks/communication/listen.bro)
rest_target(${psd} policy/frameworks/control/controllee.bro)
@ -128,18 +138,18 @@ rest_target(${psd} policy/frameworks/intel/smtp-url-extraction.bro)
rest_target(${psd} policy/frameworks/intel/smtp.bro)
rest_target(${psd} policy/frameworks/intel/ssl.bro)
rest_target(${psd} policy/frameworks/intel/where-locations.bro)
rest_target(${psd} policy/frameworks/metrics/conn-example.bro)
rest_target(${psd} policy/frameworks/metrics/http-example.bro)
rest_target(${psd} policy/frameworks/metrics/ssl-example.bro)
rest_target(${psd} policy/frameworks/software/version-changes.bro)
rest_target(${psd} policy/frameworks/software/vulnerable.bro)
rest_target(${psd} policy/integration/barnyard2/main.bro)
rest_target(${psd} policy/integration/barnyard2/types.bro)
rest_target(${psd} policy/integration/collective-intel/main.bro)
rest_target(${psd} policy/misc/analysis-groups.bro)
rest_target(${psd} policy/misc/app-metrics.bro)
rest_target(${psd} policy/misc/capture-loss.bro)
rest_target(${psd} policy/misc/detect-traceroute/main.bro)
rest_target(${psd} policy/misc/loaded-scripts.bro)
rest_target(${psd} policy/misc/profiling.bro)
rest_target(${psd} policy/misc/scan.bro)
rest_target(${psd} policy/misc/stats.bro)
rest_target(${psd} policy/misc/trim-trace-file.bro)
rest_target(${psd} policy/protocols/conn/known-hosts.bro)
@ -147,6 +157,7 @@ rest_target(${psd} policy/protocols/conn/known-services.bro)
rest_target(${psd} policy/protocols/conn/weirds.bro)
rest_target(${psd} policy/protocols/dns/auth-addl.bro)
rest_target(${psd} policy/protocols/dns/detect-external-names.bro)
rest_target(${psd} policy/protocols/ftp/detect-bruteforcing.bro)
rest_target(${psd} policy/protocols/ftp/detect.bro)
rest_target(${psd} policy/protocols/ftp/software.bro)
rest_target(${psd} policy/protocols/http/detect-MHR.bro)

View file

@ -1,264 +0,0 @@
##! This implements transparent cluster support for the metrics framework.
##! Do not load this file directly. It's only meant to be loaded automatically
##! and will be depending on if the cluster framework has been enabled.
##! The goal of this script is to make metric calculation completely and
##! transparently automated when running on a cluster.
##!
##! Events defined here are not exported deliberately because they are meant
##! to be an internal implementation detail.
@load base/frameworks/cluster
@load ./main
module Metrics;
export {
## Allows a user to decide how large of result groups the
## workers should transmit values for cluster metric aggregation.
const cluster_send_in_groups_of = 50 &redef;
## The percent of the full threshold value that needs to be met
## on a single worker for that worker to send the value to its manager in
## order for it to request a global view for that value. There is no
## requirement that the manager requests a global view for the index
## since it may opt not to if it requested a global view for the index
## recently.
const cluster_request_global_view_percent = 0.1 &redef;
## Event sent by the manager in a cluster to initiate the
## collection of metrics values for a filter.
global cluster_filter_request: event(uid: string, id: ID, filter_name: string);
## Event sent by nodes that are collecting metrics after receiving
## a request for the metric filter from the manager.
global cluster_filter_response: event(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool);
## This event is sent by the manager in a cluster to initiate the
## collection of a single index value from a filter. It's typically
## used to get intermediate updates before the break interval triggers
## to speed detection of a value crossing a threshold.
global cluster_index_request: event(uid: string, id: ID, filter_name: string, index: Index);
## This event is sent by nodes in response to a
## :bro:id:`Metrics::cluster_index_request` event.
global cluster_index_response: event(uid: string, id: ID, filter_name: string, index: Index, val: count);
## This is sent by workers to indicate that they crossed the percent of the
## current threshold by the percentage defined globally in
## :bro:id:`Metrics::cluster_request_global_view_percent`
global cluster_index_intermediate_response: event(id: Metrics::ID, filter_name: string, index: Metrics::Index, val: count);
## This event is scheduled internally on workers to send result chunks.
global send_data: event(uid: string, id: ID, filter_name: string, data: MetricTable);
}
# This is maintained by managers so they can know what data they requested and
# when they requested it.
global requested_results: table[string] of time = table() &create_expire=5mins;
# TODO: The next 4 variables make the assumption that a value never
# takes longer than 5 minutes to transmit from workers to manager. This needs to
# be tunable or self-tuning. These should also be restructured to be
# maintained within a single variable.
# This variable is maintained by manager nodes as they collect and aggregate
# results.
global filter_results: table[string, ID, string] of MetricTable &create_expire=5mins;
# This variable is maintained by manager nodes to track how many "dones" they
# collected per collection unique id. Once the number of results for a uid
# matches the number of peer nodes that results should be coming from, the
# result is written out and deleted from here.
# TODO: add an &expire_func in case not all results are received.
global done_with: table[string] of count &create_expire=5mins &default=0;
# This variable is maintained by managers to track intermediate responses as
# they are getting a global view for a certain index.
global index_requests: table[string, ID, string, Index] of count &create_expire=5mins &default=0;
# This variable is maintained by all hosts for different purposes. Non-managers
# maintain it to know what indexes they have recently sent as intermediate
# updates so they don't overwhelm their manager. Managers maintain it so they
# don't overwhelm workers with intermediate index requests. The count that is
# yielded is the number of times the percentage threshold has been crossed and
# an intermediate result has been received. The manager may optionally request
# the index again before data expires from here if too many workers are crossing
# the percentage threshold (not implemented yet!).
global recent_global_view_indexes: table[ID, string, Index] of count &create_expire=5mins &default=0;
# Add events to the cluster framework to make this work.
redef Cluster::manager2worker_events += /Metrics::cluster_(filter_request|index_request)/;
redef Cluster::worker2manager_events += /Metrics::cluster_(filter_response|index_response|index_intermediate_response)/;
@if ( Cluster::local_node_type() != Cluster::MANAGER )
# This is done on all non-manager node types in the event that a metric is
# being collected somewhere other than a worker.
function data_added(filter: Filter, index: Index, val: count)
{
# If an intermediate update for this value was sent recently, don't send
# it again.
if ( [filter$id, filter$name, index] in recent_global_view_indexes )
return;
# If val is 5 and global view % is 0.1 (10%), pct_val will be 50. If that
# crosses the full threshold then it's a candidate to send as an
# intermediate update.
local pct_val = double_to_count(val / cluster_request_global_view_percent);
if ( check_notice(filter, index, pct_val) )
{
# kick off intermediate update
event Metrics::cluster_index_intermediate_response(filter$id, filter$name, index, val);
++recent_global_view_indexes[filter$id, filter$name, index];
}
}
event Metrics::send_data(uid: string, id: ID, filter_name: string, data: MetricTable)
{
#print fmt("WORKER %s: sending data for uid %s...", Cluster::node, uid);
local local_data: MetricTable;
local num_added = 0;
for ( index in data )
{
local_data[index] = data[index];
delete data[index];
# Only send cluster_send_in_groups_of at a time. Queue another
# event to send the next group.
if ( cluster_send_in_groups_of == ++num_added )
break;
}
local done = F;
# If data is empty, this metric is done.
if ( |data| == 0 )
done = T;
event Metrics::cluster_filter_response(uid, id, filter_name, local_data, done);
if ( ! done )
event Metrics::send_data(uid, id, filter_name, data);
}
event Metrics::cluster_filter_request(uid: string, id: ID, filter_name: string)
{
#print fmt("WORKER %s: received the cluster_filter_request event.", Cluster::node);
# Initiate sending all of the data for the requested filter.
event Metrics::send_data(uid, id, filter_name, store[id, filter_name]);
# Lookup the actual filter and reset it, the reference to the data
# currently stored will be maintained interally by the send_data event.
reset(filter_store[id, filter_name]);
}
event Metrics::cluster_index_request(uid: string, id: ID, filter_name: string, index: Index)
{
local val=0;
if ( index in store[id, filter_name] )
val = store[id, filter_name][index];
# fmt("WORKER %s: received the cluster_index_request event for %s=%d.", Cluster::node, index2str(index), val);
event Metrics::cluster_index_response(uid, id, filter_name, index, val);
}
@endif
@if ( Cluster::local_node_type() == Cluster::MANAGER )
# Manager's handle logging.
event Metrics::log_it(filter: Filter)
{
#print fmt("%.6f MANAGER: breaking %s filter for %s metric", network_time(), filter$name, filter$id);
local uid = unique_id("");
# Set some tracking variables.
requested_results[uid] = network_time();
filter_results[uid, filter$id, filter$name] = table();
# Request data from peers.
event Metrics::cluster_filter_request(uid, filter$id, filter$name);
# Schedule the log_it event for the next break period.
schedule filter$break_interval { Metrics::log_it(filter) };
}
# This is unlikely to be called often, but it's here in case there are metrics
# being collected by managers.
function data_added(filter: Filter, index: Index, val: count)
{
if ( check_notice(filter, index, val) )
do_notice(filter, index, val);
}
event Metrics::cluster_index_response(uid: string, id: ID, filter_name: string, index: Index, val: count)
{
#print fmt("%0.6f MANAGER: receiving index data from %s", network_time(), get_event_peer()$descr);
if ( [uid, id, filter_name, index] !in index_requests )
index_requests[uid, id, filter_name, index] = 0;
index_requests[uid, id, filter_name, index] += val;
local ir = index_requests[uid, id, filter_name, index];
++done_with[uid];
if ( Cluster::worker_count == done_with[uid] )
{
if ( check_notice(filter_store[id, filter_name], index, ir) )
do_notice(filter_store[id, filter_name], index, ir);
delete done_with[uid];
delete index_requests[uid, id, filter_name, index];
}
}
# Managers handle intermediate updates here.
event Metrics::cluster_index_intermediate_response(id: ID, filter_name: string, index: Index, val: count)
{
#print fmt("MANAGER: receiving intermediate index data from %s", get_event_peer()$descr);
#print fmt("MANAGER: requesting index data for %s", index2str(index));
local uid = unique_id("");
event Metrics::cluster_index_request(uid, id, filter_name, index);
++recent_global_view_indexes[id, filter_name, index];
}
event Metrics::cluster_filter_response(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool)
{
#print fmt("MANAGER: receiving results from %s", get_event_peer()$descr);
local local_data = filter_results[uid, id, filter_name];
for ( index in data )
{
if ( index !in local_data )
local_data[index] = 0;
local_data[index] += data[index];
}
# Mark another worker as being "done" for this uid.
if ( done )
++done_with[uid];
# If the data has been collected from all peers, we are done and ready to log.
if ( Cluster::worker_count == done_with[uid] )
{
local ts = network_time();
# Log the time this was initially requested if it's available.
if ( uid in requested_results )
{
ts = requested_results[uid];
delete requested_results[uid];
}
write_log(ts, filter_store[id, filter_name], local_data);
# Clean up
delete filter_results[uid, id, filter_name];
delete done_with[uid];
}
}
@endif

View file

@ -1,320 +0,0 @@
##! The metrics framework provides a way to count and measure data.
@load base/frameworks/notice
module Metrics;
export {
## The metrics logging stream identifier.
redef enum Log::ID += { LOG };
## Identifiers for metrics to collect.
type ID: enum {
## Blank placeholder value.
NOTHING,
};
## The default interval used for "breaking" metrics and writing the
## current value to the logging stream.
const default_break_interval = 15mins &redef;
## This is the interval for how often threshold based notices will happen
## after they have already fired.
const renotice_interval = 1hr &redef;
## Represents a thing which is having metrics collected for it. An instance
## of this record type and a :bro:type:`Metrics::ID` together represent a
## single measurement.
type Index: record {
## Host is the value to which this metric applies.
host: addr &optional;
## A non-address related metric or a sub-key for an address based metric.
## An example might be successful SSH connections by client IP address
## where the client string would be the index value.
## Another example might be number of HTTP requests to a particular
## value in a Host header. This is an example of a non-host based
## metric since multiple IP addresses could respond for the same Host
## header value.
str: string &optional;
## The CIDR block that this metric applies to. This is typically
## only used internally for host based aggregation.
network: subnet &optional;
} &log;
## The record type that is used for logging metrics.
type Info: record {
## Timestamp at which the metric was "broken".
ts: time &log;
## What measurement the metric represents.
metric_id: ID &log;
## The name of the filter being logged. :bro:type:`Metrics::ID` values
## can have multiple filters which represent different perspectives on
## the data so this is necessary to understand the value.
filter_name: string &log;
## What the metric value applies to.
index: Index &log;
## The simple numeric value of the metric.
value: count &log;
};
# TODO: configure a metrics filter logging stream to log the current
# metrics configuration in case someone is looking through
# old logs and the configuration has changed since then.
## Filters define how the data from a metric is aggregated and handled.
## Filters can be used to set how often the measurements are cut or "broken"
## and logged or how the data within them is aggregated. It's also
## possible to disable logging and use filters for thresholding.
type Filter: record {
## The :bro:type:`Metrics::ID` that this filter applies to.
id: ID &optional;
## The name for this filter so that multiple filters can be
## applied to a single metrics to get a different view of the same
## metric data being collected (different aggregation, break, etc).
name: string &default="default";
## A predicate so that you can decide per index if you would like
## to accept the data being inserted.
pred: function(index: Index): bool &optional;
## Global mask by which you'd like to aggregate traffic.
aggregation_mask: count &optional;
## This is essentially a mapping table between addresses and subnets.
aggregation_table: table[subnet] of subnet &optional;
## The interval at which this filter should be "broken" and written
## to the logging stream. The counters are also reset to zero at
## this time so any threshold based detection needs to be set to a
## number that should be expected to happen within this period.
break_interval: interval &default=default_break_interval;
## This determines if the result of this filter is sent to the metrics
## logging stream. One use for the logging framework is as an internal
## thresholding and statistics gathering utility that is meant to
## never log but rather to generate notices and derive data.
log: bool &default=T;
## If this and a $notice_threshold value are set, this notice type
## will be generated by the metrics framework.
note: Notice::Type &optional;
## A straight threshold for generating a notice.
notice_threshold: count &optional;
## A series of thresholds at which to generate notices.
notice_thresholds: vector of count &optional;
## How often this notice should be raised for this filter. It
## will be generated everytime it crosses a threshold, but if the
## $break_interval is set to 5mins and this is set to 1hr the notice
## only be generated once per hour even if something crosses the
## threshold in every break interval.
notice_freq: interval &optional;
};
## Function to associate a metric filter with a metric ID.
##
## id: The metric ID that the filter should be associated with.
##
## filter: The record representing the filter configuration.
global add_filter: function(id: ID, filter: Filter);
## Add data into a :bro:type:`Metrics::ID`. This should be called when
## a script has measured some point value and is ready to increment the
## counters.
##
## id: The metric ID that the data represents.
##
## index: The metric index that the value is to be added to.
##
## increment: How much to increment the counter by.
global add_data: function(id: ID, index: Index, increment: count);
## Helper function to represent a :bro:type:`Metrics::Index` value as
## a simple string
##
## index: The metric index that is to be converted into a string.
##
## Returns: A string reprentation of the metric index.
global index2str: function(index: Index): string;
## Event that is used to "finish" metrics and adapt the metrics
## framework for clustered or non-clustered usage.
##
## ..note: This is primarily intended for internal use.
global log_it: event(filter: Filter);
## Event to access metrics records as they are passed to the logging framework.
global log_metrics: event(rec: Info);
## Type to store a table of metrics values. Interal use only!
type MetricTable: table[Index] of count &default=0;
}
redef record Notice::Info += {
metric_index: Index &log &optional;
};
global metric_filters: table[ID] of vector of Filter = table();
global filter_store: table[ID, string] of Filter = table();
# This is indexed by metric ID and stream filter name.
global store: table[ID, string] of MetricTable = table() &default=table();
# This function checks if a threshold has been crossed and generates a
# notice if it has. It is also used as a method to implement
# mid-break-interval threshold crossing detection for cluster deployments.
global check_notice: function(filter: Filter, index: Index, val: count): bool;
# This is hook for watching thresholds being crossed. It is called whenever
# index values are updated and the new val is given as the `val` argument.
global data_added: function(filter: Filter, index: Index, val: count);
# This stores the current threshold index for filters using the
# $notice_threshold and $notice_thresholds elements.
global thresholds: table[ID, string, Index] of count = {} &create_expire=renotice_interval &default=0;
event bro_init() &priority=5
{
Log::create_stream(Metrics::LOG, [$columns=Info, $ev=log_metrics]);
}
function index2str(index: Index): string
{
local out = "";
if ( index?$host )
out = fmt("%shost=%s", out, index$host);
if ( index?$network )
out = fmt("%s%snetwork=%s", out, |out|==0 ? "" : ", ", index$network);
if ( index?$str )
out = fmt("%s%sstr=%s", out, |out|==0 ? "" : ", ", index$str);
return fmt("metric_index(%s)", out);
}
function write_log(ts: time, filter: Filter, data: MetricTable)
{
for ( index in data )
{
local val = data[index];
local m: Info = [$ts=ts,
$metric_id=filter$id,
$filter_name=filter$name,
$index=index,
$value=val];
if ( filter$log )
Log::write(Metrics::LOG, m);
}
}
function reset(filter: Filter)
{
store[filter$id, filter$name] = table();
}
function add_filter(id: ID, filter: Filter)
{
if ( filter?$aggregation_table && filter?$aggregation_mask )
{
print "INVALID Metric filter: Defined $aggregation_table and $aggregation_mask.";
return;
}
if ( [id, filter$name] in store )
{
print fmt("INVALID Metric filter: Filter with name \"%s\" already exists.", filter$name);
return;
}
if ( filter?$notice_threshold && filter?$notice_thresholds )
{
print "INVALID Metric filter: Defined both $notice_threshold and $notice_thresholds";
return;
}
if ( ! filter?$id )
filter$id = id;
if ( id !in metric_filters )
metric_filters[id] = vector();
metric_filters[id][|metric_filters[id]|] = filter;
filter_store[id, filter$name] = filter;
store[id, filter$name] = table();
schedule filter$break_interval { Metrics::log_it(filter) };
}
function add_data(id: ID, index: Index, increment: count)
{
if ( id !in metric_filters )
return;
local filters = metric_filters[id];
# Try to add the data to all of the defined filters for the metric.
for ( filter_id in filters )
{
local filter = filters[filter_id];
# If this filter has a predicate, run the predicate and skip this
# index if the predicate return false.
if ( filter?$pred && ! filter$pred(index) )
next;
if ( index?$host )
{
if ( filter?$aggregation_mask )
{
index$network = mask_addr(index$host, filter$aggregation_mask);
delete index$host;
}
else if ( filter?$aggregation_table )
{
# Don't add the data if the aggregation table doesn't include
# the given host address.
if ( index$host !in filter$aggregation_table )
return;
index$network = filter$aggregation_table[index$host];
delete index$host;
}
}
local metric_tbl = store[id, filter$name];
if ( index !in metric_tbl )
metric_tbl[index] = 0;
metric_tbl[index] += increment;
data_added(filter, index, metric_tbl[index]);
}
}
function check_notice(filter: Filter, index: Index, val: count): bool
{
if ( (filter?$notice_threshold &&
[filter$id, filter$name, index] !in thresholds &&
val >= filter$notice_threshold) ||
(filter?$notice_thresholds &&
|filter$notice_thresholds| <= thresholds[filter$id, filter$name, index] &&
val >= filter$notice_thresholds[thresholds[filter$id, filter$name, index]]) )
return T;
else
return F;
}
function do_notice(filter: Filter, index: Index, val: count)
{
# We include $peer_descr here because the a manager count have actually
# generated the notice even though the current remote peer for the event
# calling this could be a worker if this is running as a cluster.
local n: Notice::Info = [$note=filter$note,
$n=val,
$metric_index=index,
$peer_descr=peer_description];
n$msg = fmt("Threshold crossed by %s %d/%d", index2str(index), val, filter$notice_threshold);
if ( index?$str )
n$sub = index$str;
if ( index?$host )
n$src = index$host;
# TODO: not sure where to put the network yet.
NOTICE(n);
# This just needs set to some value so that it doesn't refire the
# notice until it expires from the table or it crosses the next
# threshold in the case of vectors of thresholds.
++thresholds[filter$id, filter$name, index];
}

View file

@ -1,21 +0,0 @@
@load ./main
module Metrics;
event Metrics::log_it(filter: Filter)
{
local id = filter$id;
local name = filter$name;
write_log(network_time(), filter, store[id, name]);
reset(filter);
schedule filter$break_interval { Metrics::log_it(filter) };
}
function data_added(filter: Filter, index: Index, val: count)
{
if ( check_notice(filter, index, val) )
do_notice(filter, index, val);
}

View file

@ -1,4 +1,5 @@
@load ./main
@load ./plugins
# The cluster framework must be loaded first.
@load base/frameworks/cluster

View file

@ -0,0 +1,346 @@
##! This implements transparent cluster support for the SumStats framework.
##! Do not load this file directly. It's only meant to be loaded automatically
##! and will be depending on if the cluster framework has been enabled.
##! The goal of this script is to make sumstats calculation completely and
##! transparently automated when running on a cluster.
@load base/frameworks/cluster
@load ./main
module SumStats;
export {
## Allows a user to decide how large of result groups the workers should transmit
## values for cluster stats aggregation.
const cluster_send_in_groups_of = 50 &redef;
## The percent of the full threshold value that needs to be met on a single worker
## for that worker to send the value to its manager in order for it to request a
## global view for that value. There is no requirement that the manager requests
## a global view for the key since it may opt not to if it requested a global view
## for the key recently.
const cluster_request_global_view_percent = 0.2 &redef;
## This is to deal with intermediate update overload. A manager will only allow
## this many intermediate update requests to the workers to be inflight at any
## given time. Requested intermediate updates are currently thrown out and not
## performed. In practice this should hopefully have a minimal effect.
const max_outstanding_global_views = 10 &redef;
## Intermediate updates can cause overload situations on very large clusters. This
## option may help reduce load and correct intermittent problems. The goal for this
## option is also meant to be temporary.
const enable_intermediate_updates = T &redef;
## Event sent by the manager in a cluster to initiate the collection of values for
## a sumstat.
global cluster_ss_request: event(uid: string, ssid: string);
## Event sent by nodes that are collecting sumstats after receiving a request for
## the sumstat from the manager.
global cluster_ss_response: event(uid: string, ssid: string, data: ResultTable, done: bool);
## This event is sent by the manager in a cluster to initiate the collection of
## a single key value from a sumstat. It's typically used to get intermediate
## updates before the break interval triggers to speed detection of a value
## crossing a threshold.
global cluster_key_request: event(uid: string, ssid: string, key: Key);
## This event is sent by nodes in response to a
## :bro:id:`SumStats::cluster_key_request` event.
global cluster_key_response: event(uid: string, ssid: string, key: Key, result: Result);
## This is sent by workers to indicate that they crossed the percent
## of the current threshold by the percentage defined globally in
## :bro:id:`SumStats::cluster_request_global_view_percent`
global cluster_key_intermediate_response: event(ssid: string, key: SumStats::Key);
## This event is scheduled internally on workers to send result chunks.
global send_data: event(uid: string, ssid: string, data: ResultTable);
## This event is generated when a threshold is crossed.
global cluster_threshold_crossed: event(ssid: string, key: SumStats::Key, thold: Thresholding);
}
# Add events to the cluster framework to make this work.
redef Cluster::manager2worker_events += /SumStats::cluster_(ss_request|key_request|threshold_crossed)/;
redef Cluster::manager2worker_events += /SumStats::thresholds_reset/;
redef Cluster::worker2manager_events += /SumStats::cluster_(ss_response|key_response|key_intermediate_response)/;
@if ( Cluster::local_node_type() != Cluster::MANAGER )
# This variable is maintained to know what keys have recently sent as
# intermediate updates so they don't overwhelm their manager. The count that is
# yielded is the number of times the percentage threshold has been crossed and
# an intermediate result has been received.
global recent_global_view_keys: table[string, Key] of count &create_expire=1min &default=0;
event bro_init() &priority=-100
{
# The manager is the only host allowed to track these.
stats_store = table();
reducer_store = table();
}
# This is done on all non-manager node types in the event that a sumstat is
# being collected somewhere other than a worker.
function data_added(ss: SumStat, key: Key, result: Result)
{
# If an intermediate update for this value was sent recently, don't send
# it again.
if ( [ss$id, key] in recent_global_view_keys )
return;
# If val is 5 and global view % is 0.1 (10%), pct_val will be 50. If that
# crosses the full threshold then it's a candidate to send as an
# intermediate update.
if ( enable_intermediate_updates &&
check_thresholds(ss, key, result, cluster_request_global_view_percent) )
{
# kick off intermediate update
event SumStats::cluster_key_intermediate_response(ss$id, key);
++recent_global_view_keys[ss$id, key];
}
}
event SumStats::send_data(uid: string, ssid: string, data: ResultTable)
{
#print fmt("WORKER %s: sending data for uid %s...", Cluster::node, uid);
local local_data: ResultTable = table();
local num_added = 0;
for ( key in data )
{
local_data[key] = data[key];
delete data[key];
# Only send cluster_send_in_groups_of at a time. Queue another
# event to send the next group.
if ( cluster_send_in_groups_of == ++num_added )
break;
}
local done = F;
# If data is empty, this sumstat is done.
if ( |data| == 0 )
done = T;
# Note: copy is needed to compensate serialization caching issue. This should be
# changed to something else later.
event SumStats::cluster_ss_response(uid, ssid, copy(local_data), done);
if ( ! done )
schedule 0.01 sec { SumStats::send_data(uid, ssid, data) };
}
event SumStats::cluster_ss_request(uid: string, ssid: string)
{
#print fmt("WORKER %s: received the cluster_ss_request event for %s.", Cluster::node, id);
# Initiate sending all of the data for the requested stats.
if ( ssid in result_store )
event SumStats::send_data(uid, ssid, result_store[ssid]);
else
event SumStats::send_data(uid, ssid, table());
# Lookup the actual sumstats and reset it, the reference to the data
# currently stored will be maintained internally by the send_data event.
if ( ssid in stats_store )
reset(stats_store[ssid]);
}
event SumStats::cluster_key_request(uid: string, ssid: string, key: Key)
{
if ( ssid in result_store && key in result_store[ssid] )
{
#print fmt("WORKER %s: received the cluster_key_request event for %s=%s.", Cluster::node, key2str(key), data);
# Note: copy is needed to compensate serialization caching issue. This should be
# changed to something else later.
event SumStats::cluster_key_response(uid, ssid, key, copy(result_store[ssid][key]));
}
else
{
# We need to send an empty response if we don't have the data so that the manager
# can know that it heard back from all of the workers.
event SumStats::cluster_key_response(uid, ssid, key, table());
}
}
event SumStats::cluster_threshold_crossed(ssid: string, key: SumStats::Key, thold: Thresholding)
{
if ( ssid !in threshold_tracker )
threshold_tracker[ssid] = table();
threshold_tracker[ssid][key] = thold;
}
event SumStats::thresholds_reset(ssid: string)
{
threshold_tracker[ssid] = table();
}
@endif
@if ( Cluster::local_node_type() == Cluster::MANAGER )
# This variable is maintained by manager nodes as they collect and aggregate
# results.
# Index on a uid.
global stats_results: table[string] of ResultTable &read_expire=1min;
# This variable is maintained by manager nodes to track how many "dones" they
# collected per collection unique id. Once the number of results for a uid
# matches the number of peer nodes that results should be coming from, the
# result is written out and deleted from here.
# Indexed on a uid.
# TODO: add an &expire_func in case not all results are received.
global done_with: table[string] of count &read_expire=1min &default=0;
# This variable is maintained by managers to track intermediate responses as
# they are getting a global view for a certain key.
# Indexed on a uid.
global key_requests: table[string] of Result &read_expire=1min;
# This variable is maintained by managers to prevent overwhelming communication due
# to too many intermediate updates. Each sumstat is tracked separately so that
# one won't overwhelm and degrade other quieter sumstats.
# Indexed on a sumstat id.
global outstanding_global_views: table[string] of count &default=0;
const zero_time = double_to_time(0.0);
# Managers handle logging.
event SumStats::finish_epoch(ss: SumStat)
{
if ( network_time() > zero_time )
{
#print fmt("%.6f MANAGER: breaking %s sumstat for %s sumstat", network_time(), ss$name, ss$id);
local uid = unique_id("");
if ( uid in stats_results )
delete stats_results[uid];
stats_results[uid] = table();
# Request data from peers.
event SumStats::cluster_ss_request(uid, ss$id);
}
# Schedule the next finish_epoch event.
schedule ss$epoch { SumStats::finish_epoch(ss) };
}
# This is unlikely to be called often, but it's here in
# case there are sumstats being collected by managers.
function data_added(ss: SumStat, key: Key, result: Result)
{
if ( check_thresholds(ss, key, result, 1.0) )
{
threshold_crossed(ss, key, result);
event SumStats::cluster_threshold_crossed(ss$id, key, threshold_tracker[ss$id][key]);
}
}
event SumStats::cluster_key_response(uid: string, ssid: string, key: Key, result: Result)
{
#print fmt("%0.6f MANAGER: receiving key data from %s - %s=%s", network_time(), get_event_peer()$descr, key2str(key), result);
# We only want to try and do a value merge if there are actually measured datapoints
# in the Result.
if ( uid in key_requests )
key_requests[uid] = compose_results(key_requests[uid], result);
else
key_requests[uid] = result;
# Mark that a worker is done.
++done_with[uid];
#print fmt("worker_count:%d :: done_with:%d", Cluster::worker_count, done_with[uid]);
if ( Cluster::worker_count == done_with[uid] )
{
local ss = stats_store[ssid];
local ir = key_requests[uid];
if ( check_thresholds(ss, key, ir, 1.0) )
{
threshold_crossed(ss, key, ir);
event SumStats::cluster_threshold_crossed(ss$id, key, threshold_tracker[ss$id][key]);
}
delete done_with[uid];
delete key_requests[uid];
# Check that there is an outstanding view before subtracting.
if ( outstanding_global_views[ssid] > 0 )
--outstanding_global_views[ssid];
}
}
# Managers handle intermediate updates here.
event SumStats::cluster_key_intermediate_response(ssid: string, key: Key)
{
#print fmt("MANAGER: receiving intermediate key data from %s", get_event_peer()$descr);
#print fmt("MANAGER: requesting key data for %s", key2str(key));
if ( ssid in outstanding_global_views &&
|outstanding_global_views[ssid]| > max_outstanding_global_views )
{
# Don't do this intermediate update. Perhaps at some point in the future
# we will queue and randomly select from these ignored intermediate
# update requests.
return;
}
++outstanding_global_views[ssid];
local uid = unique_id("");
event SumStats::cluster_key_request(uid, ssid, key);
}
event SumStats::cluster_ss_response(uid: string, ssid: string, data: ResultTable, done: bool)
{
#print fmt("MANAGER: receiving results from %s", get_event_peer()$descr);
# Mark another worker as being "done" for this uid.
if ( done )
++done_with[uid];
local local_data = stats_results[uid];
local ss = stats_store[ssid];
for ( key in data )
{
if ( key in local_data )
local_data[key] = compose_results(local_data[key], data[key]);
else
local_data[key] = data[key];
# If a stat is done being collected, thresholds for each key
# need to be checked so we're doing it here to avoid doubly
# iterating over each key.
if ( Cluster::worker_count == done_with[uid] )
{
if ( check_thresholds(ss, key, local_data[key], 1.0) )
{
threshold_crossed(ss, key, local_data[key]);
event SumStats::cluster_threshold_crossed(ss$id, key, threshold_tracker[ss$id][key]);
}
}
}
# If the data has been collected from all peers, we are done and ready to finish.
if ( Cluster::worker_count == done_with[uid] )
{
if ( ss?$epoch_finished )
ss$epoch_finished(local_data);
# Clean up
delete stats_results[uid];
delete done_with[uid];
# Not sure I need to reset the sumstat on the manager.
reset(ss);
}
}
event remote_connection_handshake_done(p: event_peer) &priority=5
{
send_id(p, "SumStats::stats_store");
send_id(p, "SumStats::reducer_store");
}
@endif

View file

@ -0,0 +1,436 @@
##! The summary statistics framework provides a way to
##! summarize large streams of data into simple reduced
##! measurements.
module SumStats;
export {
## The various calculations are all defined as plugins.
type Calculation: enum {
PLACEHOLDER
};
## Represents a thing which is having summarization
## results collected for it.
type Key: record {
## A non-address related summarization or a sub-key for
## an address based summarization. An example might be
## successful SSH connections by client IP address
## where the client string would be the key value.
## Another example might be number of HTTP requests to
## a particular value in a Host header. This is an
## example of a non-host based metric since multiple
## IP addresses could respond for the same Host
## header value.
str: string &optional;
## Host is the value to which this metric applies.
host: addr &optional;
};
## Represents data being added for a single observation.
## Only supply a single field at a time!
type Observation: record {
## Count value.
num: count &optional;
## Double value.
dbl: double &optional;
## String value.
str: string &optional;
};
type Reducer: record {
## Observation stream identifier for the reducer
## to attach to.
stream: string;
## The calculations to perform on the data points.
apply: set[Calculation];
## A predicate so that you can decide per key if you
## would like to accept the data being inserted.
pred: function(key: SumStats::Key, obs: SumStats::Observation): bool &optional;
## A function to normalize the key. This can be used to aggregate or
## normalize the entire key.
normalize_key: function(key: SumStats::Key): Key &optional;
};
## Value calculated for an observation stream fed into a reducer.
## Most of the fields are added by plugins.
type ResultVal: record {
## The time when the first observation was added to
## this result value.
begin: time;
## The time when the last observation was added to
## this result value.
end: time;
## The number of observations received.
num: count &default=0;
};
## Type to store results for multiple reducers.
type Result: table[string] of ResultVal;
## Type to store a table of sumstats results indexed
## by keys.
type ResultTable: table[Key] of Result;
## SumStats represent an aggregation of reducers along with
## mechanisms to handle various situations like the epoch ending
## or thresholds being crossed.
##
## It's best to not access any global state outside
## of the variables given to the callbacks because there
## is no assurance provided as to where the callbacks
## will be executed on clusters.
type SumStat: record {
## The interval at which this filter should be "broken"
## and the '$epoch_finished' callback called. The
## results are also reset at this time so any threshold
## based detection needs to be set to a
## value that should be expected to happen within
## this epoch.
epoch: interval;
## The reducers for the SumStat
reducers: set[Reducer];
## Provide a function to calculate a value from the
## :bro:see:`Result` structure which will be used
## for thresholding.
## This is required if a $threshold value is given.
threshold_val: function(key: SumStats::Key, result: SumStats::Result): count &optional;
## The threshold value for calling the
## $threshold_crossed callback.
threshold: count &optional;
## A series of thresholds for calling the
## $threshold_crossed callback.
threshold_series: vector of count &optional;
## A callback that is called when a threshold is crossed.
threshold_crossed: function(key: SumStats::Key, result: SumStats::Result) &optional;
## A callback with the full collection of Results for
## this SumStat.
epoch_finished: function(rt: SumStats::ResultTable) &optional;
};
## Create a summary statistic.
global create: function(ss: SumStats::SumStat);
## Add data into an observation stream. This should be
## called when a script has measured some point value.
##
## id: The observation stream identifier that the data
## point represents.
##
## key: The key that the value is related to.
##
## obs: The data point to send into the stream.
global observe: function(id: string, key: SumStats::Key, obs: SumStats::Observation);
## This record is primarily used for internal threshold tracking.
type Thresholding: record {
# Internal use only. Indicates if a simple threshold was already crossed.
is_threshold_crossed: bool &default=F;
# Internal use only. Current key for threshold series.
threshold_series_index: count &default=0;
};
## This event is generated when thresholds are reset for a SumStat.
##
## ssid: SumStats ID that thresholds were reset for.
global thresholds_reset: event(ssid: string);
## Helper function to represent a :bro:type:`SumStats::Key` value as
## a simple string.
##
## key: The metric key that is to be converted into a string.
##
## Returns: A string representation of the metric key.
global key2str: function(key: SumStats::Key): string;
}
redef record Reducer += {
# Internal use only. Provides a reference back to the related SumStats by it's ID.
sid: string &optional;
};
# Internal use only. For tracking thresholds per sumstat and key.
global threshold_tracker: table[string] of table[Key] of Thresholding &optional;
redef record SumStat += {
# Internal use only (mostly for cluster coherency).
id: string &optional;
};
# Store of sumstats indexed on the sumstat id.
global stats_store: table[string] of SumStat = table();
# Store of reducers indexed on the data point stream id.
global reducer_store: table[string] of set[Reducer] = table();
# Store of results indexed on the measurement id.
global result_store: table[string] of ResultTable = table();
# Store of threshold information.
global thresholds_store: table[string, Key] of bool = table();
# This is called whenever key values are updated and the new val is given as the
# `val` argument. It's only prototyped here because cluster and non-cluster have
# separate implementations.
global data_added: function(ss: SumStat, key: Key, result: Result);
# Prototype the hook point for plugins to do calculations.
global observe_hook: hook(r: Reducer, val: double, data: Observation, rv: ResultVal);
# Prototype the hook point for plugins to initialize any result values.
global init_resultval_hook: hook(r: Reducer, rv: ResultVal);
# Prototype the hook point for plugins to merge Results.
global compose_resultvals_hook: hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal);
# Event that is used to "finish" measurements and adapt the measurement
# framework for clustered or non-clustered usage.
global finish_epoch: event(ss: SumStat);
function key2str(key: Key): string
{
local out = "";
if ( key?$host )
out = fmt("%shost=%s", out, key$host);
if ( key?$str )
out = fmt("%s%sstr=%s", out, |out|==0 ? "" : ", ", key$str);
return fmt("sumstats_key(%s)", out);
}
function init_resultval(r: Reducer): ResultVal
{
local rv: ResultVal = [$begin=network_time(), $end=network_time()];
hook init_resultval_hook(r, rv);
return rv;
}
function compose_resultvals(rv1: ResultVal, rv2: ResultVal): ResultVal
{
local result: ResultVal;
result$begin = (rv1$begin < rv2$begin) ? rv1$begin : rv2$begin;
result$end = (rv1$end > rv2$end) ? rv1$end : rv2$end;
result$num = rv1$num + rv2$num;
# Run the plugin composition hooks.
hook compose_resultvals_hook(result, rv1, rv2);
return result;
}
function compose_results(r1: Result, r2: Result): Result
{
local result: Result = table();
if ( |r1| > |r2| )
{
for ( data_id in r1 )
{
if ( data_id in r2 )
result[data_id] = compose_resultvals(r1[data_id], r2[data_id]);
else
result[data_id] = r1[data_id];
}
}
else
{
for ( data_id in r2 )
{
if ( data_id in r1 )
result[data_id] = compose_resultvals(r1[data_id], r2[data_id]);
else
result[data_id] = r2[data_id];
}
}
return result;
}
function reset(ss: SumStat)
{
if ( ss$id in result_store )
delete result_store[ss$id];
result_store[ss$id] = table();
if ( ss?$threshold || ss?$threshold_series )
{
threshold_tracker[ss$id] = table();
event SumStats::thresholds_reset(ss$id);
}
}
function create(ss: SumStat)
{
if ( (ss?$threshold || ss?$threshold_series) && ! ss?$threshold_val )
{
Reporter::error("SumStats given a threshold with no $threshold_val function");
}
if ( ! ss?$id )
ss$id=unique_id("");
threshold_tracker[ss$id] = table();
stats_store[ss$id] = ss;
for ( reducer in ss$reducers )
{
reducer$sid = ss$id;
if ( reducer$stream !in reducer_store )
reducer_store[reducer$stream] = set();
add reducer_store[reducer$stream][reducer];
}
reset(ss);
schedule ss$epoch { SumStats::finish_epoch(ss) };
}
function observe(id: string, key: Key, obs: Observation)
{
if ( id !in reducer_store )
return;
# Try to add the data to all of the defined reducers.
for ( r in reducer_store[id] )
{
if ( r?$normalize_key )
key = r$normalize_key(copy(key));
# If this reducer has a predicate, run the predicate
# and skip this key if the predicate return false.
if ( r?$pred && ! r$pred(key, obs) )
next;
local ss = stats_store[r$sid];
# If there is a threshold and no epoch_finished callback
# we don't need to continue counting since the data will
# never be accessed. This was leading
# to some state management issues when measuring
# uniqueness.
# NOTE: this optimization could need removed in the
# future if on demand access is provided to the
# SumStats results.
if ( ! ss?$epoch_finished &&
r$sid in threshold_tracker &&
key in threshold_tracker[r$sid] &&
( ss?$threshold &&
threshold_tracker[r$sid][key]$is_threshold_crossed ) ||
( ss?$threshold_series &&
threshold_tracker[r$sid][key]$threshold_series_index+1 == |ss$threshold_series| ) )
next;
if ( r$sid !in result_store )
result_store[ss$id] = table();
local results = result_store[r$sid];
if ( key !in results )
results[key] = table();
local result = results[key];
if ( id !in result )
result[id] = init_resultval(r);
local result_val = result[id];
++result_val$num;
# Continually update the $end field.
result_val$end=network_time();
# If a string was given, fall back to 1.0 as the value.
local val = 1.0;
if ( obs?$num || obs?$dbl )
val = obs?$dbl ? obs$dbl : obs$num;
hook observe_hook(r, val, obs, result_val);
data_added(ss, key, result);
}
}
# This function checks if a threshold has been crossed. It is also used as a method to implement
# mid-break-interval threshold crossing detection for cluster deployments.
function check_thresholds(ss: SumStat, key: Key, result: Result, modify_pct: double): bool
{
if ( ! (ss?$threshold || ss?$threshold_series) )
return F;
# Add in the extra ResultVals to make threshold_vals easier to write.
if ( |ss$reducers| != |result| )
{
for ( reducer in ss$reducers )
{
if ( reducer$stream !in result )
result[reducer$stream] = init_resultval(reducer);
}
}
local watch = ss$threshold_val(key, result);
if ( modify_pct < 1.0 && modify_pct > 0.0 )
watch = double_to_count(floor(watch/modify_pct));
if ( ss$id !in threshold_tracker )
threshold_tracker[ss$id] = table();
local t_tracker = threshold_tracker[ss$id];
if ( key !in t_tracker )
{
local ttmp: Thresholding;
t_tracker[key] = ttmp;
}
local tt = t_tracker[key];
if ( ss?$threshold && ! tt$is_threshold_crossed && watch >= ss$threshold )
{
# Value crossed the threshold.
return T;
}
if ( ss?$threshold_series &&
|ss$threshold_series| >= tt$threshold_series_index &&
watch >= ss$threshold_series[tt$threshold_series_index] )
{
# A threshold series was given and the value crossed the next
# value in the series.
return T;
}
return F;
}
function threshold_crossed(ss: SumStat, key: Key, result: Result)
{
# If there is no callback, there is no point in any of this.
if ( ! ss?$threshold_crossed )
return;
# Add in the extra ResultVals to make threshold_crossed callbacks easier to write.
if ( |ss$reducers| != |result| )
{
for ( reducer in ss$reducers )
{
if ( reducer$stream !in result )
result[reducer$stream] = init_resultval(reducer);
}
}
ss$threshold_crossed(key, result);
local tt = threshold_tracker[ss$id][key];
tt$is_threshold_crossed = T;
# Bump up to the next threshold series index if a threshold series is being used.
if ( ss?$threshold_series )
++tt$threshold_series_index;
}

View file

@ -0,0 +1,24 @@
@load ./main
module SumStats;
event SumStats::finish_epoch(ss: SumStat)
{
if ( ss$id in result_store )
{
local data = result_store[ss$id];
if ( ss?$epoch_finished )
ss$epoch_finished(data);
reset(ss);
}
schedule ss$epoch { SumStats::finish_epoch(ss) };
}
function data_added(ss: SumStat, key: Key, result: Result)
{
if ( check_thresholds(ss, key, result, 1.0) )
threshold_crossed(ss, key, result);
}

View file

@ -0,0 +1,8 @@
@load ./average
@load ./max
@load ./min
@load ./sample
@load ./std-dev
@load ./sum
@load ./unique
@load ./variance

View file

@ -0,0 +1,36 @@
@load base/frameworks/sumstats/main
module SumStats;
export {
redef enum Calculation += {
## Calculate the average of the values.
AVERAGE
};
redef record ResultVal += {
## For numeric data, this calculates the average of all values.
average: double &optional;
};
}
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
if ( AVERAGE in r$apply )
{
if ( ! rv?$average )
rv$average = val;
else
rv$average += (val - rv$average) / rv$num;
}
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
{
if ( rv1?$average && rv2?$average )
result$average = ((rv1$average*rv1$num) + (rv2$average*rv2$num))/(rv1$num+rv2$num);
else if ( rv1?$average )
result$average = rv1$average;
else if ( rv2?$average )
result$average = rv2$average;
}

View file

@ -0,0 +1,38 @@
@load base/frameworks/sumstats/main
module SumStats;
export {
redef enum Calculation += {
## Find the maximum value.
MAX
};
redef record ResultVal += {
## For numeric data, this tracks the maximum value given.
max: double &optional;
};
}
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
if ( MAX in r$apply )
{
if ( ! rv?$max )
rv$max = val;
else if ( val > rv$max )
rv$max = val;
}
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
{
if ( rv1?$max && rv2?$max )
result$max = (rv1$max > rv2$max) ? rv1$max : rv2$max;
else if ( rv1?$max )
result$max = rv1$max;
else if ( rv2?$max )
result$max = rv2$max;
}

View file

@ -0,0 +1,36 @@
@load base/frameworks/sumstats/main
module SumStats;
export {
redef enum Calculation += {
## Find the minimum value.
MIN
};
redef record ResultVal += {
## For numeric data, this tracks the minimum value given.
min: double &optional;
};
}
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
if ( MIN in r$apply )
{
if ( ! rv?$min )
rv$min = val;
else if ( val < rv$min )
rv$min = val;
}
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
{
if ( rv1?$min && rv2?$min )
result$min = (rv1$min < rv2$min) ? rv1$min : rv2$min;
else if ( rv1?$min )
result$min = rv1$min;
else if ( rv2?$min )
result$min = rv2$min;
}

View file

@ -0,0 +1,49 @@
@load base/frameworks/sumstats/main
@load base/utils/queue
module SumStats;
export {
redef record Reducer += {
## A number of sample Observations to collect.
samples: count &default=0;
};
redef record ResultVal += {
## This is the queue where samples are maintained. Use the
## :bro:see:`SumStats::get_samples` function to get a vector of the samples.
samples: Queue::Queue &optional;
};
## Get a vector of sample Observation values from a ResultVal.
global get_samples: function(rv: ResultVal): vector of Observation;
}
function get_samples(rv: ResultVal): vector of Observation
{
local s: vector of Observation = vector();
if ( rv?$samples )
Queue::get_vector(rv$samples, s);
return s;
}
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
if ( r$samples > 0 )
{
if ( ! rv?$samples )
rv$samples = Queue::init([$max_len=r$samples]);
Queue::put(rv$samples, obs);
}
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
{
# Merge $samples
if ( rv1?$samples && rv2?$samples )
result$samples = Queue::merge(rv1$samples, rv2$samples);
else if ( rv1?$samples )
result$samples = rv1$samples;
else if ( rv2?$samples )
result$samples = rv2$samples;
}

View file

@ -0,0 +1,34 @@
@load base/frameworks/sumstats/main
@load ./variance
module SumStats;
export {
redef enum Calculation += {
## Find the standard deviation of the values.
STD_DEV
};
redef record ResultVal += {
## For numeric data, this calculates the standard deviation.
std_dev: double &default=0.0;
};
}
function calc_std_dev(rv: ResultVal)
{
if ( rv?$variance )
rv$std_dev = sqrt(rv$variance);
}
# This depends on the variance plugin which uses priority -5
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal) &priority=-10
{
if ( STD_DEV in r$apply )
calc_std_dev(rv);
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal) &priority=-10
{
calc_std_dev(result);
}

View file

@ -0,0 +1,51 @@
@load base/frameworks/sumstats/main
module SumStats;
export {
redef enum Calculation += {
## Sums the values given. For string values,
## this will be the number of strings given.
SUM
};
redef record ResultVal += {
## For numeric data, this tracks the sum of all values.
sum: double &default=0.0;
};
type threshold_function: function(key: SumStats::Key, result: SumStats::Result): count;
global sum_threshold: function(data_id: string): threshold_function;
}
function sum_threshold(data_id: string): threshold_function
{
return function(key: SumStats::Key, result: SumStats::Result): count
{
print fmt("data_id: %s", data_id);
print result;
return double_to_count(result[data_id]$sum);
};
}
hook init_resultval_hook(r: Reducer, rv: ResultVal)
{
if ( SUM in r$apply && ! rv?$sum )
rv$sum = 0;
}
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
if ( SUM in r$apply )
rv$sum += val;
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
{
if ( rv1?$sum || rv2?$sum )
{
result$sum = rv1?$sum ? rv1$sum : 0;
if ( rv2?$sum )
result$sum += rv2$sum;
}
}

View file

@ -0,0 +1,53 @@
@load base/frameworks/sumstats/main
module SumStats;
export {
redef enum Calculation += {
## Calculate the number of unique values.
UNIQUE
};
redef record ResultVal += {
## If cardinality is being tracked, the number of unique
## items is tracked here.
unique: count &default=0;
};
}
redef record ResultVal += {
# Internal use only. This is not meant to be publically available
# because we don't want to trust that we can inspect the values
# since we will like move to a probalistic data structure in the future.
# TODO: in the future this will optionally be a hyperloglog structure
unique_vals: set[Observation] &optional;
};
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
{
if ( UNIQUE in r$apply )
{
if ( ! rv?$unique_vals )
rv$unique_vals=set();
add rv$unique_vals[obs];
rv$unique = |rv$unique_vals|;
}
}
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
{
if ( rv1?$unique_vals || rv2?$unique_vals )
{
if ( rv1?$unique_vals )
result$unique_vals = rv1$unique_vals;
if ( rv2?$unique_vals )
if ( ! result?$unique_vals )
result$unique_vals = rv2$unique_vals;
else
for ( val2 in rv2$unique_vals )
add result$unique_vals[val2];
result$unique = |result$unique_vals|;
}
}

View file

@ -0,0 +1,69 @@
@load base/frameworks/sumstats/main
@load ./average
module SumStats;
export {
redef enum Calculation += {
## Find the variance of the values.
VARIANCE
};
redef record ResultVal += {
## For numeric data, this calculates the variance.
variance: double &optional;
};
}
redef record ResultVal += {
# Internal use only. Used for incrementally calculating variance.
prev_avg: double &optional;
# Internal use only. For calculating incremental variance.
var_s: double &default=0.0;
};
function calc_variance(rv: ResultVal)
{
rv$variance = (rv$num > 1) ? rv$var_s/(rv$num-1) : 0.0;
}
# Reduced priority since this depends on the average
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal) &priority=-5
{
if ( VARIANCE in r$apply )
{
if ( rv$num > 1 )
rv$var_s += ((val - rv$prev_avg) * (val - rv$average));
calc_variance(rv);
rv$prev_avg = rv$average;
}
}
# Reduced priority since this depends on the average
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal) &priority=-5
{
if ( rv1?$var_s && rv1?$average &&
rv2?$var_s && rv2?$average )
{
local rv1_avg_sq = (rv1$average - result$average);
rv1_avg_sq = rv1_avg_sq*rv1_avg_sq;
local rv2_avg_sq = (rv2$average - result$average);
rv2_avg_sq = rv2_avg_sq*rv2_avg_sq;
result$var_s = rv1$num*(rv1$var_s/rv1$num + rv1_avg_sq) + rv2$num*(rv2$var_s/rv2$num + rv2_avg_sq);
}
else if ( rv1?$var_s )
result$var_s = rv1$var_s;
else if ( rv2?$var_s )
result$var_s = rv2$var_s;
if ( rv1?$prev_avg && rv2?$prev_avg )
result$prev_avg = ((rv1$prev_avg*rv1$num) + (rv2$prev_avg*rv2$num))/(rv1$num+rv2$num);
else if ( rv1?$prev_avg )
result$prev_avg = rv1$prev_avg;
else if ( rv2?$prev_avg )
result$prev_avg = rv2$prev_avg;
calc_variance(result);
}

View file

@ -12,8 +12,10 @@
@load base/utils/numbers
@load base/utils/paths
@load base/utils/patterns
@load base/utils/queue
@load base/utils/strings
@load base/utils/thresholds
@load base/utils/time
@load base/utils/urls
# This has some deep interplay between types and BiFs so it's
@ -27,9 +29,9 @@
@load base/frameworks/communication
@load base/frameworks/control
@load base/frameworks/cluster
@load base/frameworks/metrics
@load base/frameworks/intel
@load base/frameworks/reporter
@load base/frameworks/sumstats
@load base/frameworks/tunnels
@load base/protocols/conn

View file

@ -70,10 +70,10 @@ export {
data_channel: ExpectedDataChannel &log &optional;
## Current working directory that this session is in. By making
## the default value '/.', we can indicate that unless something
## the default value '.', we can indicate that unless something
## more concrete is discovered that the existing but unknown
## directory is ok to use.
cwd: string &default="/.";
cwd: string &default=".";
## Command that is currently waiting for a response.
cmdarg: CmdArg &optional;
@ -187,7 +187,13 @@ function ftp_message(s: Info)
local arg = s$cmdarg$arg;
if ( s$cmdarg$cmd in file_cmds )
arg = fmt("ftp://%s%s", addr_to_uri(s$id$resp_h), build_path_compressed(s$cwd, arg));
{
local comp_path = build_path_compressed(s$cwd, arg);
if ( comp_path[0] != "/" )
comp_path = cat("/", comp_path);
arg = fmt("ftp://%s%s", addr_to_uri(s$id$resp_h), comp_path);
}
s$ts=s$cmdarg$ts;
s$command=s$cmdarg$cmd;
@ -264,16 +270,13 @@ event ftp_request(c: connection, command: string, arg: string) &priority=5
event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) &priority=5
{
# TODO: figure out what to do with continued FTP response (not used much)
#if ( cont_resp ) return;
local id = c$id;
set_ftp_session(c);
c$ftp$cmdarg = get_pending_cmd(c$ftp$pending_commands, code, msg);
c$ftp$reply_code = code;
c$ftp$reply_msg = msg;
# TODO: figure out what to do with continued FTP response (not used much)
if ( cont_resp ) return;
# TODO: do some sort of generic clear text login processing here.
local response_xyz = parse_ftp_reply_code(code);
@ -302,9 +305,9 @@ event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) &prior
c$ftp$passive=T;
if ( code == 229 && data$h == [::] )
data$h = id$resp_h;
data$h = c$id$resp_h;
add_expected_data_channel(c$ftp, [$passive=T, $orig_h=id$orig_h,
add_expected_data_channel(c$ftp, [$passive=T, $orig_h=c$id$orig_h,
$resp_h=data$h, $resp_p=data$p]);
}
else

View file

@ -1,10 +1,11 @@
##! Base SSH analysis script. The heuristic to blindly determine success or
##! Base SSH analysis script. The heuristic to blindly determine success or
##! failure for SSH connections is implemented here. At this time, it only
##! uses the size of the data being returned from the server to make the
##! heuristic determination about success of the connection.
##! heuristic determination about success of the connection.
##! Requires that :bro:id:`use_conn_size_analyzer` is set to T! The heuristic
##! is not attempted if the connection size analyzer isn't enabled.
@load base/protocols/conn
@load base/frameworks/notice
@load base/utils/site
@load base/utils/thresholds
@ -16,12 +17,6 @@ module SSH;
export {
## The SSH protocol logging stream identifier.
redef enum Log::ID += { LOG };
redef enum Notice::Type += {
## Indicates that a heuristically detected "successful" SSH
## authentication occurred.
Login
};
type Info: record {
## Time when the SSH connection began.
@ -30,10 +25,10 @@ export {
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## Indicates if the login was heuristically guessed to be "success"
## or "failure".
status: string &log &optional;
## Direction of the connection. If the client was a local host
## Indicates if the login was heuristically guessed to be "success",
## "failure", or "undetermined".
status: string &log &default="undetermined";
## Direction of the connection. If the client was a local host
## logging into an external host, this would be OUTBOUND. INBOUND
## would be set for the opposite situation.
# TODO: handle local-local and remote-remote better.
@ -43,33 +38,33 @@ export {
## Software string from the server.
server: string &log &optional;
## Amount of data returned from the server. This is currently
## the only measure of the success heuristic and it is logged to
## the only measure of the success heuristic and it is logged to
## assist analysts looking at the logs to make their own determination
## about the success on a case-by-case basis.
resp_size: count &log &default=0;
## Indicate if the SSH session is done being watched.
done: bool &default=F;
};
## The size in bytes of data sent by the server at which the SSH
## The size in bytes of data sent by the server at which the SSH
## connection is presumed to be successful.
const authentication_data_size = 5500 &redef;
const authentication_data_size = 4000 &redef;
## If true, we tell the event engine to not look at further data
## packets after the initial SSH handshake. Helps with performance
## (especially with large file transfers) but precludes some
## kinds of analyses (e.g., tracking connection size).
## kinds of analyses.
const skip_processing_after_detection = F &redef;
## Event that is generated when the heuristic thinks that a login
## was successful.
global heuristic_successful_login: event(c: connection);
## Event that is generated when the heuristic thinks that a login
## failed.
global heuristic_failed_login: event(c: connection);
## Event that can be handled to access the :bro:type:`SSH::Info`
## record as it is sent on to the logging framework.
global log_ssh: event(rec: Info);
@ -104,55 +99,61 @@ function set_session(c: connection)
function check_ssh_connection(c: connection, done: bool)
{
# If done watching this connection, just return.
# If already done watching this connection, just return.
if ( c$ssh$done )
return;
# Make sure conn_size_analyzer is active by checking
# resp$num_bytes_ip. In general it should always be active though.
if ( ! c$resp?$num_bytes_ip )
return;
# Remove the IP and TCP header length from the total size.
# TODO: Fix for IPv6. This whole approach also seems to break in some
# cases where there are more header bytes than num_bytes_ip.
local header_bytes = c$resp$num_pkts*32 + c$resp$num_pkts*20;
local server_bytes = c$resp$num_bytes_ip;
if ( server_bytes >= header_bytes )
server_bytes = server_bytes - header_bytes;
else
server_bytes = c$resp$size;
# If this is still a live connection and the byte count has not crossed
# the threshold, just return and let the rescheduled check happen later.
if ( ! done && server_bytes < authentication_data_size )
return;
# Make sure the server has sent back more than 50 bytes to filter out
# hosts that are just port scanning. Nothing is ever logged if the server
# doesn't send back at least 50 bytes.
if ( server_bytes < 50 )
return;
c$ssh$direction = Site::is_local_addr(c$id$orig_h) ? OUTBOUND : INBOUND;
c$ssh$resp_size = server_bytes;
if ( server_bytes < authentication_data_size )
if ( done )
{
c$ssh$status = "failure";
event SSH::heuristic_failed_login(c);
# If this connection is done, then we can look to see if
# this matches the conditions for a failed login. Failed
# logins are only detected at connection state removal.
if ( # Require originators to have sent at least 50 bytes.
c$orig$size > 50 &&
# Responders must be below 4000 bytes.
c$resp$size < 4000 &&
# Responder must have sent fewer than 40 packets.
c$resp$num_pkts < 40 &&
# If there was a content gap we can't reliably do this heuristic.
c?$conn && c$conn$missed_bytes == 0)# &&
# Only "normal" connections can count.
#c$conn?$conn_state && c$conn$conn_state in valid_states )
{
c$ssh$status = "failure";
event SSH::heuristic_failed_login(c);
}
if ( c$resp$size > authentication_data_size )
{
c$ssh$status = "success";
event SSH::heuristic_successful_login(c);
}
}
else
{
# presumed successful login
c$ssh$status = "success";
event SSH::heuristic_successful_login(c);
{
# If this connection is still being tracked, then it's possible
# to watch for it to be a successful connection.
if ( c$resp$size > authentication_data_size )
{
c$ssh$status = "success";
event SSH::heuristic_successful_login(c);
}
else
# This connection must be tracked longer. Let the scheduled
# check happen again.
return;
}
# Set the direction for the log.
c$ssh$direction = Site::is_local_addr(c$id$orig_h) ? OUTBOUND : INBOUND;
# Set the "done" flag to prevent the watching event from rescheduling
# after detection is done.
c$ssh$done=T;
Log::write(SSH::LOG, c$ssh);
if ( skip_processing_after_detection )
{
# Stop watching this connection, we don't care about it anymore.
@ -161,18 +162,6 @@ function check_ssh_connection(c: connection, done: bool)
}
}
event SSH::heuristic_successful_login(c: connection) &priority=-5
{
NOTICE([$note=Login,
$msg="Heuristically detected successful SSH login.",
$conn=c]);
Log::write(SSH::LOG, c$ssh);
}
event SSH::heuristic_failed_login(c: connection) &priority=-5
{
Log::write(SSH::LOG, c$ssh);
}
event connection_state_remove(c: connection) &priority=-5
{
@ -197,12 +186,12 @@ event ssh_server_version(c: connection, version: string) &priority=5
set_session(c);
c$ssh$server = version;
}
event ssh_client_version(c: connection, version: string) &priority=5
{
set_session(c);
c$ssh$client = version;
# The heuristic detection for SSH relies on the ConnSize analyzer.
# Don't do the heuristics if it's disabled.
if ( use_conn_size_analyzer )

View file

@ -19,7 +19,7 @@ function extract_path(input: string): string
}
## Compresses a given path by removing '..'s and the parent directory it
## references and also removing '/'s.
## references and also removing dual '/'s and extraneous '/./'s.
## dir: a path string, either relative or absolute
## Returns: a compressed version of the input path
function compress_path(dir: string): string
@ -41,7 +41,7 @@ function compress_path(dir: string): string
return compress_path(dir);
}
const multislash_sep = /(\/){2,}/;
const multislash_sep = /(\/\.?){2,}/;
parts = split_all(dir, multislash_sep);
for ( i in parts )
if ( i % 2 == 0 )

View file

@ -0,0 +1,143 @@
##! A FIFO queue.
module Queue;
export {
## Settings for initializing the queue.
type Settings: record {
## If a maximum length is set for the queue
## it will maintain itself at that
## maximum length automatically.
max_len: count &optional;
};
## The internal data structure for the queue.
type Queue: record {};
## Initialize a queue record structure.
##
## s: A :bro:record:`Settings` record configuring the queue.
##
## Returns: An opaque queue record.
global init: function(s: Settings): Queue;
## Put a string onto the beginning of a queue.
##
## q: The queue to put the value into.
##
## val: The value to insert into the queue.
global put: function(q: Queue, val: any);
## Get a string from the end of a queue.
##
## q: The queue to get the string from.
##
## Returns: The value gotten from the queue.
global get: function(q: Queue): any;
## Merge two queue's together. If any settings are applied
## to the queues, the settings from q1 are used for the new
## merged queue.
##
## q1: The first queue. Settings are taken from here.
##
## q2: The second queue.
##
## Returns: A new queue from merging the other two together.
global merge: function(q1: Queue, q2: Queue): Queue;
## Get the number of items in a queue.
##
## q: The queue.
##
## Returns: The length of the queue.
global len: function(q: Queue): count;
## Get the contents of the queue as a vector.
##
## q: The queue.
##
## ret: A vector containing the
## current contents of q as the type of ret.
global get_vector: function(q: Queue, ret: vector of any);
}
redef record Queue += {
# Indicator for if the queue was appropriately initialized.
initialized: bool &default=F;
# The values are stored here.
vals: table[count] of any &optional;
# Settings for the queue.
settings: Settings &optional;
# The top value in the vals table.
top: count &default=0;
# The bottom value in the vals table.
bottom: count &default=0;
# The number of bytes in the queue.
size: count &default=0;
};
function init(s: Settings): Queue
{
local q: Queue;
q$vals=table();
q$settings = copy(s);
q$initialized=T;
return q;
}
function put(q: Queue, val: any)
{
if ( q$settings?$max_len && len(q) >= q$settings$max_len )
get(q);
q$vals[q$top] = val;
++q$top;
}
function get(q: Queue): any
{
local ret = q$vals[q$bottom];
delete q$vals[q$bottom];
++q$bottom;
return ret;
}
function merge(q1: Queue, q2: Queue): Queue
{
local ret = init(q1$settings);
local i = q1$bottom;
local j = q2$bottom;
for ( ignored_val in q1$vals )
{
if ( i in q1$vals )
put(ret, q1$vals[i]);
if ( j in q2$vals )
put(ret, q2$vals[j]);
++i;
++j;
}
return ret;
}
function len(q: Queue): count
{
return |q$vals|;
}
function get_vector(q: Queue, ret: vector of any)
{
local i = q$bottom;
local j = 0;
# Really dumb hack, this is only to provide
# the iteration for the correct number of
# values in q$vals.
for ( ignored_val in q$vals )
{
if ( i >= q$top )
break;
ret[j] = q$vals[i];
++j; ++i;
}
}

View file

@ -0,0 +1,9 @@
## Given an interval, returns a string of the form 3m34s to
## give a minimalized human readable string for the minutes
## and seconds represented by the interval.
function duration_to_mins_secs(dur: interval): string
{
local dur_count = double_to_count(interval_to_double(dur));
return fmt("%dm%ds", dur_count/60, dur_count%60);
}

View file

@ -1,25 +0,0 @@
##! An example of using the metrics framework to collect connection metrics
##! aggregated into /24 CIDR ranges.
@load base/frameworks/metrics
@load base/utils/site
redef enum Metrics::ID += {
CONNS_ORIGINATED,
CONNS_RESPONDED
};
event bro_init()
{
Metrics::add_filter(CONNS_ORIGINATED, [$aggregation_mask=24, $break_interval=1mins]);
# Site::local_nets must be defined in order for this to actually do anything.
Metrics::add_filter(CONNS_RESPONDED, [$aggregation_table=Site::local_nets_table, $break_interval=1mins]);
}
event connection_established(c: connection)
{
Metrics::add_data(CONNS_ORIGINATED, [$host=c$id$orig_h], 1);
Metrics::add_data(CONNS_RESPONDED, [$host=c$id$resp_h], 1);
}

View file

@ -1,37 +0,0 @@
##! Provides an example of aggregating and limiting collection down to
##! only local networks. Additionally, the status code for the response from
##! the request is added into the metric.
@load base/frameworks/metrics
@load base/protocols/http
@load base/utils/site
redef enum Metrics::ID += {
## Measures HTTP requests indexed on both the request host and the response
## code from the server.
HTTP_REQUESTS_BY_STATUS_CODE,
## Currently unfinished and not working.
HTTP_REQUESTS_BY_HOST_HEADER,
};
event bro_init()
{
# TODO: these are waiting on a fix with table vals + records before they will work.
#Metrics::add_filter(HTTP_REQUESTS_BY_HOST_HEADER,
# [$pred(index: Metrics::Index) = { return Site::is_local_addr(index$host); },
# $aggregation_mask=24,
# $break_interval=1min]);
# Site::local_nets must be defined in order for this to actually do anything.
Metrics::add_filter(HTTP_REQUESTS_BY_STATUS_CODE, [$aggregation_table=Site::local_nets_table,
$break_interval=1min]);
}
event HTTP::log_http(rec: HTTP::Info)
{
if ( rec?$host )
Metrics::add_data(HTTP_REQUESTS_BY_HOST_HEADER, [$str=rec$host], 1);
if ( rec?$status_code )
Metrics::add_data(HTTP_REQUESTS_BY_STATUS_CODE, [$host=rec$id$orig_h, $str=fmt("%d", rec$status_code)], 1);
}

View file

@ -1,28 +0,0 @@
##! Provides an example of using the metrics framework to collect the number
##! of times a specific server name indicator value is seen in SSL session
##! establishments. Names ending in google.com are being filtered out as an
##! example of the predicate based filtering in metrics filters.
@load base/frameworks/metrics
@load base/protocols/ssl
redef enum Metrics::ID += {
SSL_SERVERNAME,
};
event bro_init()
{
Metrics::add_filter(SSL_SERVERNAME,
[$name="no-google-ssl-servers",
$pred(index: Metrics::Index) = {
return (/google\.com$/ !in index$str);
},
$break_interval=10secs
]);
}
event SSL::log_ssl(rec: SSL::Info)
{
if ( rec?$server_name )
Metrics::add_data(SSL_SERVERNAME, [$str=rec$server_name], 1);
}

View file

@ -43,15 +43,6 @@ export {
global internal_vulnerable_versions: table[string] of set[VulnerableVersionRange] = table();
event Control::configuration_update()
{
internal_vulnerable_versions = table();
# Copy the const vulnerable versions into the global modifiable one.
for ( sw in vulnerable_versions )
internal_vulnerable_versions[sw] = vulnerable_versions[sw];
}
function decode_vulnerable_version_range(vuln_sw: string): VulnerableVersionRange
{
# Create a max value with a dunce value only because the $max field
@ -115,11 +106,27 @@ event grab_vulnerable_versions(i: count)
}
}
event bro_init()
function update_vulnerable_sw()
{
internal_vulnerable_versions = table();
# Copy the const vulnerable versions into the global modifiable one.
for ( sw in vulnerable_versions )
internal_vulnerable_versions[sw] = vulnerable_versions[sw];
event grab_vulnerable_versions(1);
}
event bro_init() &priority=3
{
update_vulnerable_sw();
}
event Control::configuration_update() &priority=3
{
update_vulnerable_sw();
}
event log_software(rec: Info)
{
if ( rec$name !in internal_vulnerable_versions )

View file

@ -0,0 +1,109 @@
@load base/protocols/http
@load base/protocols/ssl
@load base/frameworks/sumstats
module AppStats;
export {
redef enum Log::ID += { LOG };
type Info: record {
## Timestamp when the log line was finished and written.
ts: time &log;
## Time interval that the log line covers.
ts_delta: interval &log;
## The name of the "app", like "facebook" or "netflix".
app: string &log;
## The number of unique local hosts using the app.
uniq_hosts: count &log;
## The number of hits to the app in total.
hits: count &log;
## The total number of bytes received by users of the app.
bytes: count &log;
};
## The frequency of logging the stats collected by this script.
const break_interval = 15mins &redef;
}
redef record connection += {
resp_hostname: string &optional;
};
event bro_init() &priority=3
{
Log::create_stream(AppStats::LOG, [$columns=Info]);
local r1: SumStats::Reducer = [$stream="apps.bytes", $apply=set(SumStats::SUM)];
local r2: SumStats::Reducer = [$stream="apps.hits", $apply=set(SumStats::UNIQUE)];
SumStats::create([$epoch=break_interval,
$reducers=set(r1, r2),
$epoch_finished(data: SumStats::ResultTable) =
{
local l: Info;
l$ts = network_time();
l$ts_delta = break_interval;
for ( key in data )
{
local result = data[key];
l$app = key$str;
l$bytes = double_to_count(floor(result["apps.bytes"]$sum));
l$hits = result["apps.hits"]$num;
l$uniq_hosts = result["apps.hits"]$unique;
Log::write(LOG, l);
}
}]);
}
function add_sumstats(id: conn_id, hostname: string, size: count)
{
if ( /\.youtube\.com$/ in hostname && size > 512*1024 )
{
SumStats::observe("apps.bytes", [$str="youtube"], [$num=size]);
SumStats::observe("apps.hits", [$str="youtube"], [$str=cat(id$orig_h)]);
}
else if ( /(\.facebook\.com|\.fbcdn\.net)$/ in hostname && size > 20 )
{
SumStats::observe("apps.bytes", [$str="facebook"], [$num=size]);
SumStats::observe("apps.hits", [$str="facebook"], [$str=cat(id$orig_h)]);
}
else if ( /\.google\.com$/ in hostname && size > 20 )
{
SumStats::observe("apps.bytes", [$str="google"], [$num=size]);
SumStats::observe("apps.hits", [$str="google"], [$str=cat(id$orig_h)]);
}
else if ( /\.nflximg\.com$/ in hostname && size > 200*1024 )
{
SumStats::observe("apps.bytes", [$str="netflix"], [$num=size]);
SumStats::observe("apps.hits", [$str="netflix"], [$str=cat(id$orig_h)]);
}
else if ( /\.(pandora|p-cdn)\.com$/ in hostname && size > 512*1024 )
{
SumStats::observe("apps.bytes", [$str="pandora"], [$num=size]);
SumStats::observe("apps.hits", [$str="pandora"], [$str=cat(id$orig_h)]);
}
else if ( /\.gmail\.com$/ in hostname && size > 20 )
{
SumStats::observe("apps.bytes", [$str="gmail"], [$num=size]);
SumStats::observe("apps.hits", [$str="gmail"], [$str=cat(id$orig_h)]);
}
}
event ssl_established(c: connection)
{
if ( c?$ssl && c$ssl?$server_name )
c$resp_hostname = c$ssl$server_name;
}
event connection_finished(c: connection)
{
if ( c?$resp_hostname )
add_sumstats(c$id, c$resp_hostname, c$resp$size);
}
event HTTP::log_http(rec: HTTP::Info)
{
if( rec?$host )
add_sumstats(rec$id, rec$host, rec$response_body_len);
}

View file

@ -8,7 +8,6 @@
##! for a sequence number that's above a gap).
@load base/frameworks/notice
@load base/frameworks/metrics
module CaptureLoss;

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,9 @@
signature traceroute-detector-ipv4 {
header ip[8] < 10
event "match"
}
signature traceroute-detector-ipv6 {
header ip6[7] < 10
event "match"
}

View file

@ -0,0 +1,98 @@
##! This script detects a large number of ICMP Time Exceeded messages heading toward
##! hosts that have sent low TTL packets. It generates a notice when the number of
##! ICMP Time Exceeded messages for a source-destination pair exceeds a
##! threshold.
@load base/frameworks/sumstats
@load base/frameworks/signatures
@load-sigs ./detect-low-ttls.sig
redef Signatures::ignored_ids += /traceroute-detector.*/;
module Traceroute;
export {
redef enum Log::ID += { LOG };
redef enum Notice::Type += {
## Indicates that a host was seen running traceroutes. For more
## detail about specific traceroutes that we run, refer to the
## traceroute.log.
Detected
};
## By default this script requires that any host detected running traceroutes
## first send low TTL packets (TTL < 10) to the traceroute destination host.
## Changing this this setting to `F` will relax the detection a bit by
## solely relying on ICMP time-exceeded messages to detect traceroute.
const require_low_ttl_packets = T &redef;
## Defines the threshold for ICMP Time Exceeded messages for a src-dst pair.
## This threshold only comes into play after a host is found to be
## sending low ttl packets.
const icmp_time_exceeded_threshold = 3 &redef;
## Interval at which to watch for the
## :bro:id:`ICMPTimeExceeded::icmp_time_exceeded_threshold` variable to be crossed.
## At the end of each interval the counter is reset.
const icmp_time_exceeded_interval = 3min &redef;
## The log record for the traceroute log.
type Info: record {
## Timestamp
ts: time &log;
## Address initiaing the traceroute.
src: addr &log;
## Destination address of the traceroute.
dst: addr &log;
## Protocol used for the traceroute.
proto: string &log;
};
global log_traceroute: event(rec: Traceroute::Info);
}
event bro_init() &priority=5
{
Log::create_stream(Traceroute::LOG, [$columns=Info, $ev=log_traceroute]);
local r1: SumStats::Reducer = [$stream="traceroute.time_exceeded", $apply=set(SumStats::UNIQUE)];
local r2: SumStats::Reducer = [$stream="traceroute.low_ttl_packet", $apply=set(SumStats::SUM)];
SumStats::create([$epoch=icmp_time_exceeded_interval,
$reducers=set(r1, r2),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
# Give a threshold value of zero depending on if the host
# sends a low ttl packet.
if ( require_low_ttl_packets && result["traceroute.low_ttl_packet"]$sum == 0 )
return 0;
else
return result["traceroute.time_exceeded"]$unique;
},
$threshold=icmp_time_exceeded_threshold,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
local parts = split_n(key$str, /-/, F, 2);
local src = to_addr(parts[1]);
local dst = to_addr(parts[2]);
local proto = parts[3];
Log::write(LOG, [$ts=network_time(), $src=src, $dst=dst, $proto=proto]);
NOTICE([$note=Traceroute::Detected,
$msg=fmt("%s seems to be running traceroute using %s", src, proto),
$src=src,
$identifier=cat(src,proto)]);
}]);
}
# Low TTL packets are detected with a signature.
event signature_match(state: signature_state, msg: string, data: string)
{
if ( state$sig_id == /traceroute-detector.*/ )
{
SumStats::observe("traceroute.low_ttl_packet", [$str=cat(state$conn$id$orig_h,"-",state$conn$id$resp_h,"-",get_port_transport_proto(state$conn$id$resp_p))], [$num=1]);
}
}
event icmp_time_exceeded(c: connection, icmp: icmp_conn, code: count, context: icmp_context)
{
SumStats::observe("traceroute.time_exceeded", [$str=cat(context$id$orig_h,"-",context$id$resp_h,"-",get_port_transport_proto(context$id$resp_p))], [$str=cat(c$id$orig_h)]);
}

View file

@ -1,4 +1,5 @@
##! Log the loaded scripts.
@load base/utils/paths
module LoadedScripts;
@ -34,5 +35,5 @@ event bro_init() &priority=5
event bro_script_loaded(path: string, level: count)
{
Log::write(LoadedScripts::LOG, [$name=cat(depth[level], path)]);
Log::write(LoadedScripts::LOG, [$name=cat(depth[level], compress_path(path))]);
}

View file

@ -0,0 +1,193 @@
##! TCP Scan detection
##!
##! ..Authors: Sheharbano Khattak
##! Seth Hall
##! All the authors of the old scan.bro
@load base/frameworks/notice
@load base/frameworks/sumstats
@load base/utils/time
module Scan;
export {
redef enum Notice::Type += {
## Address scans detect that a host appears to be scanning some number of
## destinations on a single port. This notice is generated when more than
## :bro:id:`addr_scan_threshold` unique hosts are seen over the previous
## :bro:id:`addr_scan_interval` time range.
Address_Scan,
## Port scans detect that an attacking host appears to be scanning a
## single victim host on several ports. This notice is generated when
## an attacking host attempts to connect to :bro:id:`port_scan_threshold`
## unique ports on a single host over the previous
## :bro:id:`port_scan_interval` time range.
Port_Scan,
};
## Failed connection attempts are tracked over this time interval for the address
## scan detection. A higher interval will detect slower scanners, but may also
## yield more false positives.
const addr_scan_interval = 5min &redef;
## Failed connection attempts are tracked over this time interval for the port scan
## detection. A higher interval will detect slower scanners, but may also yield
## more false positives.
const port_scan_interval = 5min &redef;
## The threshold of a unique number of hosts a scanning host has to have failed
## connections with on a single port.
const addr_scan_threshold = 25 &redef;
## The threshold of a number of unique ports a scanning host has to have failed
## connections with on a single victim host.
const port_scan_threshold = 15 &redef;
## Custom thresholds based on service for address scan. This is primarily
## useful for setting reduced thresholds for specific ports.
const addr_scan_custom_thresholds: table[port] of count &redef;
global Scan::addr_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port);
global Scan::port_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port);
}
event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="scan.addr.fail", $apply=set(SumStats::UNIQUE)];
SumStats::create([$epoch=addr_scan_interval,
$reducers=set(r1),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["scan.addr.fail"]$unique);
},
#$threshold_func=check_addr_scan_threshold,
$threshold=addr_scan_threshold,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
local r = result["scan.addr.fail"];
local side = Site::is_local_addr(key$host) ? "local" : "remote";
local dur = duration_to_mins_secs(r$end-r$begin);
local message=fmt("%s scanned at least %d unique hosts on port %s in %s", key$host, r$unique, key$str, dur);
NOTICE([$note=Address_Scan,
$src=key$host,
$p=to_port(key$str),
$sub=side,
$msg=message,
$identifier=cat(key$host)]);
}]);
# Note: port scans are tracked similar to: table[src_ip, dst_ip] of set(port);
local r2: SumStats::Reducer = [$stream="scan.port.fail", $apply=set(SumStats::UNIQUE)];
SumStats::create([$epoch=port_scan_interval,
$reducers=set(r2),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["scan.port.fail"]$unique);
},
$threshold=port_scan_threshold,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
local r = result["scan.port.fail"];
local side = Site::is_local_addr(key$host) ? "local" : "remote";
local dur = duration_to_mins_secs(r$end-r$begin);
local message = fmt("%s scanned at least %d unique ports of host %s in %s", key$host, r$unique, key$str, dur);
NOTICE([$note=Port_Scan,
$src=key$host,
$dst=to_addr(key$str),
$sub=side,
$msg=message,
$identifier=cat(key$host)]);
}]);
}
function add_sumstats(id: conn_id, reverse: bool)
{
local scanner = id$orig_h;
local victim = id$resp_h;
local scanned_port = id$resp_p;
if ( reverse )
{
scanner = id$resp_h;
victim = id$orig_h;
scanned_port = id$orig_p;
}
if ( hook Scan::addr_scan_policy(scanner, victim, scanned_port) )
SumStats::observe("scan.addr.fail", [$host=scanner, $str=cat(scanned_port)], [$str=cat(victim)]);
if ( hook Scan::port_scan_policy(scanner, victim, scanned_port) )
SumStats::observe("scan.port.fail", [$host=scanner, $str=cat(victim)], [$str=cat(scanned_port)]);
}
function is_failed_conn(c: connection): bool
{
# Sr || ( (hR || ShR) && (data not sent in any direction) )
if ( (c$orig$state == TCP_SYN_SENT && c$resp$state == TCP_RESET) ||
(((c$orig$state == TCP_RESET && c$resp$state == TCP_SYN_ACK_SENT) ||
(c$orig$state == TCP_RESET && c$resp$state == TCP_ESTABLISHED && "S" in c$history )
) && /[Dd]/ !in c$history )
)
return T;
return F;
}
function is_reverse_failed_conn(c: connection): bool
{
# reverse scan i.e. conn dest is the scanner
# sR || ( (Hr || sHr) && (data not sent in any direction) )
if ( (c$resp$state == TCP_SYN_SENT && c$orig$state == TCP_RESET) ||
(((c$resp$state == TCP_RESET && c$orig$state == TCP_SYN_ACK_SENT) ||
(c$resp$state == TCP_RESET && c$orig$state == TCP_ESTABLISHED && "s" in c$history )
) && /[Dd]/ !in c$history )
)
return T;
return F;
}
## Generated for an unsuccessful connection attempt. This
## event is raised when an originator unsuccessfully attempted
## to establish a connection. “Unsuccessful” is defined as at least
## tcp_attempt_delay seconds having elapsed since the originator first sent a
## connection establishment packet to the destination without seeing a reply.
event connection_attempt(c: connection)
{
local is_reverse_scan = F;
if ( "H" in c$history )
is_reverse_scan = T;
add_sumstats(c$id, is_reverse_scan);
}
## Generated for a rejected TCP connection. This event is raised when an originator
## attempted to setup a TCP connection but the responder replied with a RST packet
## denying it.
event connection_rejected(c: connection)
{
local is_reverse_scan = F;
if ( "s" in c$history )
is_reverse_scan = T;
add_sumstats(c$id, is_reverse_scan);
}
## Generated when an endpoint aborted a TCP connection. The event is raised when
## one endpoint of an *established* TCP connection aborted by sending a RST packet.
event connection_reset(c: connection)
{
if ( is_failed_conn(c) )
add_sumstats(c$id, F);
else if ( is_reverse_failed_conn(c) )
add_sumstats(c$id, T);
}
## Generated for each still-open connection when Bro terminates.
event connection_pending(c: connection)
{
if ( is_failed_conn(c) )
add_sumstats(c$id, F);
else if ( is_reverse_failed_conn(c) )
add_sumstats(c$id, T);
}

View file

@ -0,0 +1,59 @@
##! FTP brute-forcing detector, triggering when too many rejected usernames or
##! failed passwords have occured from a single address.
@load base/protocols/ftp
@load base/frameworks/sumstats
@load base/utils/time
module FTP;
export {
redef enum Notice::Type += {
## Indicates a host bruteforcing FTP logins by watching for too many
## rejected usernames or failed passwords.
Bruteforcing
};
## How many rejected usernames or passwords are required before being
## considered to be bruteforcing.
const bruteforce_threshold = 20 &redef;
## The time period in which the threshold needs to be crossed before
## being reset.
const bruteforce_measurement_interval = 15mins &redef;
}
event bro_init()
{
local r1: SumStats::Reducer = [$stream="ftp.failed_auth", $apply=set(SumStats::UNIQUE)];
SumStats::create([$epoch=bruteforce_measurement_interval,
$reducers=set(r1),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return result["ftp.failed_auth"]$num;
},
$threshold=bruteforce_threshold,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
local r = result["ftp.failed_auth"];
local dur = duration_to_mins_secs(r$end-r$begin);
local plural = r$unique>1 ? "s" : "";
local message = fmt("%s had %d failed logins on %d FTP server%s in %s", key$host, r$num, r$unique, plural, dur);
NOTICE([$note=FTP::Bruteforcing,
$src=key$host,
$msg=message,
$identifier=cat(key$host)]);
}]);
}
event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool)
{
local cmd = c$ftp$cmdarg$cmd;
if ( cmd == "USER" || cmd == "PASS" )
{
if ( FTP::parse_ftp_reply_code(code)$x == 5 )
SumStats::observe("ftp.failed_auth", [$host=c$id$orig_h], [$str=cat(c$id$resp_h)]);
}
}

View file

@ -1,7 +1,7 @@
##! SQL injection attack detection in HTTP.
@load base/frameworks/notice
@load base/frameworks/metrics
@load base/frameworks/sumstats
@load base/protocols/http
module HTTP;
@ -14,36 +14,34 @@ export {
## it. This is tracked by IP address as opposed to hostname.
SQL_Injection_Victim,
};
redef enum Metrics::ID += {
## Metric to track SQL injection attackers.
SQLI_ATTACKER,
## Metrics to track SQL injection victims.
SQLI_VICTIM,
};
redef enum Tags += {
## Indicator of a URI based SQL injection attack.
URI_SQLI,
## Indicator of client body based SQL injection attack. This is
## Indicator of client body based SQL injection attack. This is
## typically the body content of a POST request. Not implemented yet.
POST_SQLI,
## Indicator of a cookie based SQL injection attack. Not implemented yet.
COOKIE_SQLI,
};
## Defines the threshold that determines if an SQL injection attack
## is ongoing based on the number of requests that appear to be SQL
## is ongoing based on the number of requests that appear to be SQL
## injection attacks.
const sqli_requests_threshold = 50 &redef;
## Interval at which to watch for the
## :bro:id:`HTTP::sqli_requests_threshold` variable to be crossed.
## At the end of each interval the counter is reset.
const sqli_requests_interval = 5min &redef;
## Collecting samples will add extra data to notice emails
## by collecting some sample SQL injection url paths. Disable
## sample collection by setting this value to 0.
const collect_SQLi_samples = 5 &redef;
## Regular expression is used to match URI based SQL injections.
const match_sql_injection_uri =
const match_sql_injection_uri =
/[\?&][^[:blank:]\x00-\x37\|]+?=[\-[:alnum:]%]+([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x37]|\/\*.*?\*\/|\)?;)+.*?([hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x37]|\/\*.*?\*\/)+/
| /[\?&][^[:blank:]\x00-\x37\|]+?=[\-0-9%]+([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x37]|\/\*.*?\*\/|\)?;)+([xX]?[oO][rR]|[nN]?[aA][nN][dD])([[:blank:]\x00-\x37]|\/\*.*?\*\/)+['"]?(([^a-zA-Z&]+)?=|[eE][xX][iI][sS][tT][sS])/
| /[\?&][^[:blank:]\x00-\x37]+?=[\-0-9%]*([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x37]|\/\*.*?\*\/)*(-|=|\+|\|\|)([[:blank:]\x00-\x37]|\/\*.*?\*\/)*([0-9]|\(?[cC][oO][nN][vV][eE][rR][tT]|[cC][aA][sS][tT])/
@ -52,20 +50,54 @@ export {
| /\/\*![[:digit:]]{5}.*?\*\// &redef;
}
function format_sqli_samples(samples: vector of SumStats::Observation): string
{
local ret = "SQL Injection samples\n---------------------";
for ( i in samples )
ret += "\n" + samples[i]$str;
return ret;
}
event bro_init() &priority=3
{
# Add filters to the metrics so that the metrics framework knows how to
# Add filters to the metrics so that the metrics framework knows how to
# determine when it looks like an actual attack and how to respond when
# thresholds are crossed.
Metrics::add_filter(SQLI_ATTACKER, [$log=F,
$notice_threshold=sqli_requests_threshold,
$break_interval=sqli_requests_interval,
$note=SQL_Injection_Attacker]);
Metrics::add_filter(SQLI_VICTIM, [$log=F,
$notice_threshold=sqli_requests_threshold,
$break_interval=sqli_requests_interval,
$note=SQL_Injection_Victim]);
local r1: SumStats::Reducer = [$stream="http.sqli.attacker", $apply=set(SumStats::SUM), $samples=collect_SQLi_samples];
SumStats::create([$epoch=sqli_requests_interval,
$reducers=set(r1),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["http.sqli.attacker"]$sum);
},
$threshold=sqli_requests_threshold,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
local r = result["http.sqli.attacker"];
NOTICE([$note=SQL_Injection_Attacker,
$msg="An SQL injection attacker was discovered!",
$email_body_sections=vector(format_sqli_samples(SumStats::get_samples(r))),
$src=key$host,
$identifier=cat(key$host)]);
}]);
local r2: SumStats::Reducer = [$stream="http.sqli.victim", $apply=set(SumStats::SUM), $samples=collect_SQLi_samples];
SumStats::create([$epoch=sqli_requests_interval,
$reducers=set(r2),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["http.sqli.victim"]$sum);
},
$threshold=sqli_requests_threshold,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
local r = result["http.sqli.victim"];
NOTICE([$note=SQL_Injection_Victim,
$msg="An SQL injection victim was discovered!",
$email_body_sections=vector(format_sqli_samples(SumStats::get_samples(r))),
$src=key$host,
$identifier=cat(key$host)]);
}]);
}
event http_request(c: connection, method: string, original_URI: string,
@ -74,8 +106,8 @@ event http_request(c: connection, method: string, original_URI: string,
if ( match_sql_injection_uri in unescaped_URI )
{
add c$http$tags[URI_SQLI];
Metrics::add_data(SQLI_ATTACKER, [$host=c$id$orig_h], 1);
Metrics::add_data(SQLI_VICTIM, [$host=c$id$resp_h], 1);
SumStats::observe("http.sqli.attacker", [$host=c$id$orig_h], [$str=original_URI]);
SumStats::observe("http.sqli.victim", [$host=c$id$resp_h], [$str=original_URI]);
}
}

View file

@ -2,7 +2,7 @@
##! bruteforcing over SSH.
@load base/protocols/ssh
@load base/frameworks/metrics
@load base/frameworks/sumstats
@load base/frameworks/notice
@load base/frameworks/intel
@ -10,7 +10,7 @@ module SSH;
export {
redef enum Notice::Type += {
## Indicates that a host has been identified as crossing the
## Indicates that a host has been identified as crossing the
## :bro:id:`SSH::password_guesses_limit` threshold with heuristically
## determined failed logins.
Password_Guessing,
@ -19,10 +19,10 @@ export {
## currently implemented.
Login_By_Password_Guesser,
};
redef enum Metrics::ID += {
## Metric is to measure failed logins.
FAILED_LOGIN,
redef enum Intel::Where += {
## An indicator of the login for the intel framework.
SSH::SUCCESSFUL_LOGIN,
};
## The number of failed SSH connections before a host is designated as
@ -33,47 +33,54 @@ export {
## model of a password guesser.
const guessing_timeout = 30 mins &redef;
## This value can be used to exclude hosts or entire networks from being
## This value can be used to exclude hosts or entire networks from being
## tracked as potential "guessers". There are cases where the success
## heuristic fails and this acts as the whitelist. The index represents
## heuristic fails and this acts as the whitelist. The index represents
## client subnets and the yield value represents server subnets.
const ignore_guessers: table[subnet] of subnet &redef;
## Tracks hosts identified as guessing passwords.
global password_guessers: set[addr]
&read_expire=guessing_timeout+1hr &synchronized &redef;
}
event bro_init()
{
Metrics::add_filter(FAILED_LOGIN, [$name="detect-bruteforcing", $log=F,
$note=Password_Guessing,
$notice_threshold=password_guesses_limit,
$notice_freq=1hr,
$break_interval=guessing_timeout]);
local r1: SumStats::Reducer = [$stream="ssh.login.failure", $apply=set(SumStats::SUM)];
SumStats::create([$epoch=guessing_timeout,
$reducers=set(r1),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["ssh.login.failure"]$sum);
},
$threshold=password_guesses_limit,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
local r = result["ssh.login.failure"];
# Generate the notice.
NOTICE([$note=Password_Guessing,
$msg=fmt("%s appears to be guessing SSH passwords (seen in %d connections).", key$host, r$num),
$src=key$host,
$identifier=cat(key$host)]);
# Insert the guesser into the intel framework.
Intel::insert([$host=key$host,
$meta=[$source="local",
$desc=fmt("Bro observed %d apparently failed SSH connections.", r$num)]]);
}]);
}
event SSH::heuristic_successful_login(c: connection)
{
local id = c$id;
# TODO: This is out for the moment pending some more additions to the
# metrics framework.
#if ( id$orig_h in password_guessers )
# {
# NOTICE([$note=Login_By_Password_Guesser,
# $conn=c,
# $msg=fmt("Successful SSH login by password guesser %s", id$orig_h)]);
# }
Intel::seen([$host=id$orig_h,
$conn=c,
$where=SSH::SUCCESSFUL_LOGIN]);
}
event SSH::heuristic_failed_login(c: connection)
{
local id = c$id;
# Add data to the FAILED_LOGIN metric unless this connection should
# Add data to the FAILED_LOGIN metric unless this connection should
# be ignored.
if ( ! (id$orig_h in ignore_guessers &&
id$resp_h in ignore_guessers[id$orig_h]) )
Metrics::add_data(FAILED_LOGIN, [$host=id$orig_h], 1);
SumStats::observe("ssh.login.failure", [$host=id$orig_h], [$num=1]);
}

View file

@ -8,6 +8,9 @@
# Apply the default tuning scripts for common tuning settings.
@load tuning/defaults
# Load the scan detection script.
@load misc/scan
# Generate notices when vulnerable versions of software are discovered.
# The default is to only monitor software found in the address space defined
# as "local". Refer to the software framework's documentation for more

View file

@ -24,9 +24,6 @@
@load frameworks/intel/smtp.bro
@load frameworks/intel/ssl.bro
@load frameworks/intel/where-locations.bro
@load frameworks/metrics/conn-example.bro
@load frameworks/metrics/http-example.bro
@load frameworks/metrics/ssl-example.bro
@load frameworks/software/version-changes.bro
@load frameworks/software/vulnerable.bro
@load integration/barnyard2/__load__.bro
@ -35,9 +32,13 @@
@load integration/collective-intel/__load__.bro
@load integration/collective-intel/main.bro
@load misc/analysis-groups.bro
@load misc/app-metrics.bro
@load misc/capture-loss.bro
@load misc/detect-traceroute/__load__.bro
@load misc/detect-traceroute/main.bro
@load misc/loaded-scripts.bro
@load misc/profiling.bro
@load misc/scan.bro
@load misc/stats.bro
@load misc/trim-trace-file.bro
@load protocols/conn/known-hosts.bro
@ -45,6 +46,7 @@
@load protocols/conn/weirds.bro
@load protocols/dns/auth-addl.bro
@load protocols/dns/detect-external-names.bro
@load protocols/ftp/detect-bruteforcing.bro
@load protocols/ftp/detect.bro
@load protocols/ftp/software.bro
@load protocols/http/detect-MHR.bro

View file

@ -77,7 +77,7 @@ char* CompositeHash::SingleValHash(int type_check, char* kp0,
*kp = ( v ? 1 : 0);
kp0 = reinterpret_cast<char*>(kp+1);
if ( ! v )
if ( ! v )
return kp0;
}
@ -181,16 +181,24 @@ char* CompositeHash::SingleValHash(int type_check, char* kp0,
Val* key = lv->Index(i);
if ( ! (kp1 = SingleValHash(type_check, kp1, key->Type(), key,
false)) )
{
Unref(lv);
return 0;
}
if ( ! v->Type()->IsSet() )
{
Val* val = tv->Lookup(key);
if ( ! (kp1 = SingleValHash(type_check, kp1, val->Type(),
val, false)) )
{
Unref(lv);
return 0;
}
}
}
Unref(lv);
}
break;
@ -454,16 +462,27 @@ int CompositeHash::SingleTypeKeySize(BroType* bt, const Val* v,
Val* key = lv->Index(i);
sz = SingleTypeKeySize(key->Type(), key, type_check, sz, false,
calc_static_size);
if ( ! sz ) return 0;
if ( ! sz )
{
Unref(lv);
return 0;
}
if ( ! bt->IsSet() )
{
Val* val = tv->Lookup(key);
sz = SingleTypeKeySize(val->Type(), val, type_check, sz,
false, calc_static_size);
if ( ! sz ) return 0;
if ( ! sz )
{
Unref(lv);
return 0;
}
}
}
Unref(lv);
break;
}
@ -830,7 +849,10 @@ const char* CompositeHash::RecoverOneVal(const HashKey* k, const char* kp0,
}
for ( int i = 0; i < n; ++i )
{
tv->Assign(keys[i], t->IsSet() ? 0 : values[i]);
Unref(keys[i]);
}
pval = tv;
}

View file

@ -4026,7 +4026,27 @@ Val* RecordCoerceExpr::Fold(Val* v) const
Type()->AsRecordType()->FieldDecl(i)->FindAttr(ATTR_DEFAULT);
if ( def )
val->Assign(i, def->AttrExpr()->Eval(0));
{
Val* def_val = def->AttrExpr()->Eval(0);
BroType* def_type = def_val->Type();
BroType* field_type = Type()->AsRecordType()->FieldType(i);
if ( def_type->Tag() == TYPE_RECORD &&
field_type->Tag() == TYPE_RECORD &&
! same_type(def_type, field_type) )
{
Val* tmp = def_val->AsRecordVal()->CoerceTo(
field_type->AsRecordType());
if ( tmp )
{
Unref(def_val);
def_val = tmp;
}
}
val->Assign(i, def_val);
}
else
val->Assign(i, 0);
}
@ -4297,6 +4317,10 @@ Val* ScheduleExpr::Eval(Frame* f) const
if ( args )
{
TimerMgr* tmgr = mgr.CurrentTimerMgr();
if ( ! tmgr )
tmgr = timer_mgr;
tmgr->Add(new ScheduleTimer(event->Handler(), args, dt, tmgr));
}

View file

@ -240,6 +240,11 @@ TableType* record_field_table;
StringVal* cmd_line_bpf_filter;
OpaqueType* md5_type;
OpaqueType* sha1_type;
OpaqueType* sha256_type;
OpaqueType* entropy_type;
#include "const.bif.netvar_def"
#include "types.bif.netvar_def"
#include "event.bif.netvar_def"
@ -300,6 +305,11 @@ void init_general_global_var()
cmd_line_bpf_filter =
internal_val("cmd_line_bpf_filter")->AsStringVal();
md5_type = new OpaqueType("md5");
sha1_type = new OpaqueType("sha1");
sha256_type = new OpaqueType("sha256");
entropy_type = new OpaqueType("entropy");
}
void init_net_var()
@ -350,7 +360,7 @@ void init_net_var()
opt_internal_int("tcp_excessive_data_without_further_acks");
x509_type = internal_type("X509")->AsRecordType();
socks_address = internal_type("SOCKS::Address")->AsRecordType();
non_analyzed_lifetime = opt_internal_double("non_analyzed_lifetime");

View file

@ -244,6 +244,12 @@ extern TableType* record_field_table;
extern StringVal* cmd_line_bpf_filter;
class OpaqueType;
extern OpaqueType* md5_type;
extern OpaqueType* sha1_type;
extern OpaqueType* sha256_type;
extern OpaqueType* entropy_type;
// Initializes globals that don't pertain to network/event analysis.
extern void init_general_global_var();

View file

@ -1,4 +1,5 @@
#include "OpaqueVal.h"
#include "NetVar.h"
#include "Reporter.h"
#include "Serializer.h"
@ -72,6 +73,10 @@ bool HashVal::DoUnserialize(UnserialInfo* info)
return UNSERIALIZE(&valid);
}
MD5Val::MD5Val() : HashVal(md5_type)
{
}
void MD5Val::digest(val_list& vlist, u_char result[MD5_DIGEST_LENGTH])
{
MD5_CTX h;
@ -189,6 +194,10 @@ bool MD5Val::DoUnserialize(UnserialInfo* info)
return true;
}
SHA1Val::SHA1Val() : HashVal(sha1_type)
{
}
void SHA1Val::digest(val_list& vlist, u_char result[SHA_DIGEST_LENGTH])
{
SHA_CTX h;
@ -297,6 +306,10 @@ bool SHA1Val::DoUnserialize(UnserialInfo* info)
return true;
}
SHA256Val::SHA256Val() : HashVal(sha256_type)
{
}
void SHA256Val::digest(val_list& vlist, u_char result[SHA256_DIGEST_LENGTH])
{
SHA256_CTX h;
@ -410,6 +423,9 @@ bool SHA256Val::DoUnserialize(UnserialInfo* info)
return true;
}
EntropyVal::EntropyVal() : OpaqueVal(entropy_type)
{
}
bool EntropyVal::Feed(const void* data, size_t size)
{

View file

@ -36,7 +36,7 @@ public:
u_char key[MD5_DIGEST_LENGTH],
u_char result[MD5_DIGEST_LENGTH]);
MD5Val() : HashVal(new OpaqueType("md5")) { Unref(Type()); }
MD5Val();
protected:
friend class Val;
@ -55,7 +55,7 @@ class SHA1Val : public HashVal {
public:
static void digest(val_list& vlist, u_char result[SHA_DIGEST_LENGTH]);
SHA1Val() : HashVal(new OpaqueType("sha1")) { Unref(Type()); }
SHA1Val();
protected:
friend class Val;
@ -74,7 +74,7 @@ class SHA256Val : public HashVal {
public:
static void digest(val_list& vlist, u_char result[SHA256_DIGEST_LENGTH]);
SHA256Val() : HashVal(new OpaqueType("sha256")) { Unref(Type()); }
SHA256Val();
protected:
friend class Val;
@ -91,7 +91,7 @@ private:
class EntropyVal : public OpaqueVal {
public:
EntropyVal() : OpaqueVal(new OpaqueType("entropy")) { }
EntropyVal();
bool Feed(const void* data, size_t size);
bool Get(double *r_ent, double *r_chisq, double *r_mean,

View file

@ -566,7 +566,7 @@ void TCP_Analyzer::UpdateInactiveState(double t,
else
endpoint->SetState(TCP_ENDPOINT_SYN_SENT);
if ( connection_attempt )
if ( tcp_attempt_delay )
ADD_ANALYZER_TIMER(&TCP_Analyzer::AttemptTimer,
t + tcp_attempt_delay, 1,
TIMER_TCP_ATTEMPT);
@ -1497,24 +1497,7 @@ void TCP_Analyzer::ExpireTimer(double t)
if ( resp->state == TCP_ENDPOINT_INACTIVE )
{
if ( (orig->state == TCP_ENDPOINT_SYN_SENT ||
orig->state == TCP_ENDPOINT_SYN_ACK_SENT) )
{
if ( ! connection_attempt )
{
// Time out the connection attempt,
// since the AttemptTimer isn't going
// to do it for us, and we don't want
// to clog the data structures with
// old, failed attempts.
Event(connection_timeout);
is_active = 0;
sessions->Remove(Conn());
return;
}
}
else if ( orig->state == TCP_ENDPOINT_INACTIVE )
if ( orig->state == TCP_ENDPOINT_INACTIVE )
{
// Nothing ever happened on this connection.
// This can occur when we see a trashed

View file

@ -1749,7 +1749,7 @@ Val* TableVal::Default(Val* index)
if ( def_val->Type()->Tag() != TYPE_FUNC ||
same_type(def_val->Type(), Type()->YieldType()) )
return def_val->Ref();
return def_attr->AttrExpr()->IsConst() ? def_val->Ref() : def_val->Clone();
const Func* f = def_val->AsFunc();
val_list* vl = new val_list();
@ -2560,10 +2560,22 @@ RecordVal::RecordVal(RecordType* t) : MutableVal(t)
Attributes* a = record_type->FieldDecl(i)->attrs;
Attr* def_attr = a ? a->FindAttr(ATTR_DEFAULT) : 0;
Val* def = def_attr ? def_attr->AttrExpr()->Eval(0) : 0;
BroType* type = record_type->FieldDecl(i)->type;
if ( def && type->Tag() == TYPE_RECORD &&
def->Type()->Tag() == TYPE_RECORD &&
! same_type(def->Type(), type) )
{
Val* tmp = def->AsRecordVal()->CoerceTo(type->AsRecordType());
if ( tmp )
{
Unref(def);
def = tmp;
}
}
if ( ! def && ! (a && a->FindAttr(ATTR_OPTIONAL)) )
{
BroType* type = record_type->FieldDecl(i)->type;
TypeTag tag = type->Tag();
if ( tag == TYPE_RECORD )

View file

@ -623,7 +623,7 @@ function md5_hmac%(...%): string
## ``md5_hash_update(c$http$md5_handle, some_more_data)`` in the
## :bro:id:`http_entity_data` event handler. When all data has arrived, a call
## to :bro:id:`md5_hash_finish` returns the final hash value.
##
##
## Returns: The opaque handle associated with this hash computation.
##
## .. bro:see:: md5_hmac md5_hash md5_hash_update md5_hash_finish
@ -2461,6 +2461,97 @@ function bytestring_to_double%(s: string%): double
return new Val(ntohd(d), TYPE_DOUBLE);
%}
## Converts a string of bytes to a :bro:type:`count`.
##
## s: A string of bytes containing the binary representation of the value.
##
## is_le: If true, *s* is assumed to be in little endian format, else it's big endian.
##
## Returns: The value contained in *s*, or 0 if the conversion failed.
##
function bytestring_to_count%(s: string, is_le: bool%): count
%{
#ifdef HOST_BIGENDIAN
static const bool host_bigendian = true;
#else
static const bool host_bigendian = false;
#endif
const u_char *p = s->Bytes();
unsigned int i;
switch ( s->Len() ) {
case sizeof(uint8):
{
uint8 value = 0;
memcpy(&value, p, sizeof(uint8));
return new Val(value, TYPE_COUNT);
}
case sizeof(uint16):
{
uint16 value = 0;
if ( (host_bigendian && is_le) || (! host_bigendian && ! is_le) )
{
char buf[sizeof(uint16)];
char *d = &buf[sizeof(uint16)-1];
for ( i = 0; i < sizeof(uint16); i++ )
*d-- = *p++;
memcpy(&value, buf, sizeof(uint16));
}
else
memcpy(&value, p, sizeof(uint16));
return new Val(value, TYPE_COUNT);
}
case sizeof(uint32):
{
uint32 value = 0;
if ( (host_bigendian && is_le) || (! host_bigendian && ! is_le) )
{
char buf[sizeof(uint32)];
char *d = &buf[sizeof(uint32)-1];
for ( i = 0; i < sizeof(uint32); i++ )
*d-- = *p++;
memcpy(&value, buf, sizeof(uint32));
}
else
memcpy(&value, p, sizeof(uint32));
return new Val(value, TYPE_COUNT);
}
case sizeof(uint64):
{
uint64 value = 0;
if ( (host_bigendian && is_le) || (! host_bigendian && ! is_le) )
{
char buf[sizeof(uint64)];
char *d = &buf[sizeof(uint64)-1];
for ( i = 0; i < sizeof(uint64); i++ )
*d-- = *p++;
memcpy(&value, buf, sizeof(uint64));
}
else
memcpy(&value, p, sizeof(uint64));
return new Val(value, TYPE_COUNT);
}
}
builtin_error("unsupported byte length for bytestring_to_count");
return new Val(0, TYPE_COUNT);
%}
## Converts a reverse pointer name to an address. For example,
## ``1.0.168.192.in-addr.arpa`` to ``192.168.0.1``.
##

View file

@ -0,0 +1,36 @@
0
0
0
0
0
0
255
255
0
0
1000
1000
12345
12345
0
0
65535
65535
4294967295
4294967295
287454020
1144201745
255
255
2864429994
2864429994
0
0
18446744073709551615
18446744073709551615
18446742974214701055
18446742974214701055
65535
65535
0
0

View file

@ -3,7 +3,7 @@
#empty_field (empty)
#unset_field -
#path loaded_scripts
#open 2013-01-16-18-04-57
#open 2013-05-06-15-17-44
#fields name
#types string
scripts/base/init-bare.bro
@ -14,24 +14,25 @@ scripts/base/init-bare.bro
build/src/base/reporter.bif.bro
build/src/base/event.bif.bro
scripts/base/frameworks/logging/__load__.bro
scripts/base/frameworks/logging/./main.bro
scripts/base/frameworks/logging/main.bro
build/src/base/logging.bif.bro
scripts/base/frameworks/logging/./postprocessors/__load__.bro
scripts/base/frameworks/logging/./postprocessors/./scp.bro
scripts/base/frameworks/logging/./postprocessors/./sftp.bro
scripts/base/frameworks/logging/./writers/ascii.bro
scripts/base/frameworks/logging/./writers/dataseries.bro
scripts/base/frameworks/logging/./writers/elasticsearch.bro
scripts/base/frameworks/logging/./writers/none.bro
scripts/base/frameworks/logging/postprocessors/__load__.bro
scripts/base/frameworks/logging/postprocessors/scp.bro
scripts/base/frameworks/logging/postprocessors/sftp.bro
scripts/base/frameworks/logging/writers/ascii.bro
scripts/base/frameworks/logging/writers/dataseries.bro
scripts/base/frameworks/logging/writers/elasticsearch.bro
scripts/base/frameworks/logging/writers/none.bro
scripts/base/frameworks/input/__load__.bro
scripts/base/frameworks/input/./main.bro
scripts/base/frameworks/input/main.bro
build/src/base/input.bif.bro
scripts/base/frameworks/input/./readers/ascii.bro
scripts/base/frameworks/input/./readers/raw.bro
scripts/base/frameworks/input/./readers/benchmark.bro
scripts/base/frameworks/input/./readers/binary.bro
scripts/base/frameworks/input/readers/ascii.bro
scripts/base/frameworks/input/readers/raw.bro
scripts/base/frameworks/input/readers/benchmark.bro
scripts/base/frameworks/input/readers/binary.bro
scripts/base/frameworks/file-analysis/__load__.bro
scripts/base/frameworks/file-analysis/./main.bro
scripts/base/frameworks/file-analysis/main.bro
build/src/base/file_analysis.bif.bro
scripts/policy/misc/loaded-scripts.bro
#close 2013-01-16-18-04-57
scripts/base/utils/paths.bro
#close 2013-05-06-15-17-44

View file

@ -3,7 +3,7 @@
#empty_field (empty)
#unset_field -
#path loaded_scripts
#open 2013-02-11-18-44-43
#open 2013-05-06-15-17-58
#fields name
#types string
scripts/base/init-bare.bro
@ -14,117 +14,128 @@ scripts/base/init-bare.bro
build/src/base/reporter.bif.bro
build/src/base/event.bif.bro
scripts/base/frameworks/logging/__load__.bro
scripts/base/frameworks/logging/./main.bro
scripts/base/frameworks/logging/main.bro
build/src/base/logging.bif.bro
scripts/base/frameworks/logging/./postprocessors/__load__.bro
scripts/base/frameworks/logging/./postprocessors/./scp.bro
scripts/base/frameworks/logging/./postprocessors/./sftp.bro
scripts/base/frameworks/logging/./writers/ascii.bro
scripts/base/frameworks/logging/./writers/dataseries.bro
scripts/base/frameworks/logging/./writers/elasticsearch.bro
scripts/base/frameworks/logging/./writers/none.bro
scripts/base/frameworks/logging/postprocessors/__load__.bro
scripts/base/frameworks/logging/postprocessors/scp.bro
scripts/base/frameworks/logging/postprocessors/sftp.bro
scripts/base/frameworks/logging/writers/ascii.bro
scripts/base/frameworks/logging/writers/dataseries.bro
scripts/base/frameworks/logging/writers/elasticsearch.bro
scripts/base/frameworks/logging/writers/none.bro
scripts/base/frameworks/input/__load__.bro
scripts/base/frameworks/input/./main.bro
scripts/base/frameworks/input/main.bro
build/src/base/input.bif.bro
scripts/base/frameworks/input/./readers/ascii.bro
scripts/base/frameworks/input/./readers/raw.bro
scripts/base/frameworks/input/./readers/benchmark.bro
scripts/base/frameworks/input/./readers/binary.bro
scripts/base/frameworks/input/readers/ascii.bro
scripts/base/frameworks/input/readers/raw.bro
scripts/base/frameworks/input/readers/benchmark.bro
scripts/base/frameworks/input/readers/binary.bro
scripts/base/frameworks/file-analysis/__load__.bro
scripts/base/frameworks/file-analysis/./main.bro
scripts/base/frameworks/file-analysis/main.bro
build/src/base/file_analysis.bif.bro
scripts/base/init-default.bro
scripts/base/utils/site.bro
scripts/base/utils/./patterns.bro
scripts/base/utils/patterns.bro
scripts/base/utils/addrs.bro
scripts/base/utils/conn-ids.bro
scripts/base/utils/directions-and-hosts.bro
scripts/base/utils/files.bro
scripts/base/utils/numbers.bro
scripts/base/utils/paths.bro
scripts/base/utils/queue.bro
scripts/base/utils/strings.bro
scripts/base/utils/thresholds.bro
scripts/base/utils/time.bro
scripts/base/utils/urls.bro
scripts/base/frameworks/notice/__load__.bro
scripts/base/frameworks/notice/./main.bro
scripts/base/frameworks/notice/./weird.bro
scripts/base/frameworks/notice/./actions/drop.bro
scripts/base/frameworks/notice/./actions/email_admin.bro
scripts/base/frameworks/notice/./actions/page.bro
scripts/base/frameworks/notice/./actions/add-geodata.bro
scripts/base/frameworks/notice/./extend-email/hostnames.bro
scripts/base/frameworks/notice/main.bro
scripts/base/frameworks/notice/weird.bro
scripts/base/frameworks/notice/actions/drop.bro
scripts/base/frameworks/notice/actions/email_admin.bro
scripts/base/frameworks/notice/actions/page.bro
scripts/base/frameworks/notice/actions/add-geodata.bro
scripts/base/frameworks/notice/extend-email/hostnames.bro
scripts/base/frameworks/cluster/__load__.bro
scripts/base/frameworks/cluster/./main.bro
scripts/base/frameworks/cluster/main.bro
scripts/base/frameworks/control/__load__.bro
scripts/base/frameworks/control/./main.bro
scripts/base/frameworks/notice/./non-cluster.bro
scripts/base/frameworks/notice/./actions/pp-alarms.bro
scripts/base/frameworks/control/main.bro
scripts/base/frameworks/notice/non-cluster.bro
scripts/base/frameworks/notice/actions/pp-alarms.bro
scripts/base/frameworks/dpd/__load__.bro
scripts/base/frameworks/dpd/./main.bro
scripts/base/frameworks/dpd/main.bro
scripts/base/frameworks/signatures/__load__.bro
scripts/base/frameworks/signatures/./main.bro
scripts/base/frameworks/signatures/main.bro
scripts/base/frameworks/packet-filter/__load__.bro
scripts/base/frameworks/packet-filter/./main.bro
scripts/base/frameworks/packet-filter/./netstats.bro
scripts/base/frameworks/packet-filter/main.bro
scripts/base/frameworks/packet-filter/netstats.bro
scripts/base/frameworks/software/__load__.bro
scripts/base/frameworks/software/./main.bro
scripts/base/frameworks/software/main.bro
scripts/base/frameworks/communication/__load__.bro
scripts/base/frameworks/communication/./main.bro
scripts/base/frameworks/metrics/__load__.bro
scripts/base/frameworks/metrics/./main.bro
scripts/base/frameworks/metrics/./non-cluster.bro
scripts/base/frameworks/communication/main.bro
scripts/base/frameworks/intel/__load__.bro
scripts/base/frameworks/intel/./main.bro
scripts/base/frameworks/intel/./input.bro
scripts/base/frameworks/intel/main.bro
scripts/base/frameworks/intel/input.bro
scripts/base/frameworks/reporter/__load__.bro
scripts/base/frameworks/reporter/./main.bro
scripts/base/frameworks/reporter/main.bro
scripts/base/frameworks/sumstats/__load__.bro
scripts/base/frameworks/sumstats/main.bro
scripts/base/frameworks/sumstats/plugins/__load__.bro
scripts/base/frameworks/sumstats/plugins/average.bro
scripts/base/frameworks/sumstats/plugins/max.bro
scripts/base/frameworks/sumstats/plugins/min.bro
scripts/base/frameworks/sumstats/plugins/sample.bro
scripts/base/frameworks/sumstats/plugins/std-dev.bro
scripts/base/frameworks/sumstats/plugins/variance.bro
scripts/base/frameworks/sumstats/plugins/sum.bro
scripts/base/frameworks/sumstats/plugins/unique.bro
scripts/base/frameworks/sumstats/non-cluster.bro
scripts/base/frameworks/tunnels/__load__.bro
scripts/base/frameworks/tunnels/./main.bro
scripts/base/frameworks/tunnels/main.bro
scripts/base/protocols/conn/__load__.bro
scripts/base/protocols/conn/./main.bro
scripts/base/protocols/conn/./contents.bro
scripts/base/protocols/conn/./inactivity.bro
scripts/base/protocols/conn/./polling.bro
scripts/base/protocols/conn/main.bro
scripts/base/protocols/conn/contents.bro
scripts/base/protocols/conn/inactivity.bro
scripts/base/protocols/conn/polling.bro
scripts/base/protocols/dns/__load__.bro
scripts/base/protocols/dns/./consts.bro
scripts/base/protocols/dns/./main.bro
scripts/base/protocols/dns/consts.bro
scripts/base/protocols/dns/main.bro
scripts/base/protocols/ftp/__load__.bro
scripts/base/protocols/ftp/./utils-commands.bro
scripts/base/protocols/ftp/./main.bro
scripts/base/protocols/ftp/./file-analysis.bro
scripts/base/protocols/ftp/./file-extract.bro
scripts/base/protocols/ftp/./gridftp.bro
scripts/base/protocols/ftp/utils-commands.bro
scripts/base/protocols/ftp/main.bro
scripts/base/protocols/ftp/file-analysis.bro
scripts/base/protocols/ftp/file-extract.bro
scripts/base/protocols/ftp/gridftp.bro
scripts/base/protocols/ssl/__load__.bro
scripts/base/protocols/ssl/./consts.bro
scripts/base/protocols/ssl/./main.bro
scripts/base/protocols/ssl/./mozilla-ca-list.bro
scripts/base/protocols/ssl/consts.bro
scripts/base/protocols/ssl/main.bro
scripts/base/protocols/ssl/mozilla-ca-list.bro
scripts/base/protocols/http/__load__.bro
scripts/base/protocols/http/./main.bro
scripts/base/protocols/http/./utils.bro
scripts/base/protocols/http/./file-analysis.bro
scripts/base/protocols/http/./file-ident.bro
scripts/base/protocols/http/./file-hash.bro
scripts/base/protocols/http/./file-extract.bro
scripts/base/protocols/http/main.bro
scripts/base/protocols/http/utils.bro
scripts/base/protocols/http/file-analysis.bro
scripts/base/protocols/http/file-ident.bro
scripts/base/protocols/http/file-hash.bro
scripts/base/protocols/http/file-extract.bro
scripts/base/protocols/irc/__load__.bro
scripts/base/protocols/irc/./main.bro
scripts/base/protocols/irc/./dcc-send.bro
scripts/base/protocols/irc/./file-analysis.bro
scripts/base/protocols/irc/main.bro
scripts/base/protocols/irc/dcc-send.bro
scripts/base/protocols/irc/file-analysis.bro
scripts/base/protocols/modbus/__load__.bro
scripts/base/protocols/modbus/./consts.bro
scripts/base/protocols/modbus/./main.bro
scripts/base/protocols/modbus/consts.bro
scripts/base/protocols/modbus/main.bro
scripts/base/protocols/smtp/__load__.bro
scripts/base/protocols/smtp/./main.bro
scripts/base/protocols/smtp/./entities.bro
scripts/base/protocols/smtp/./entities-excerpt.bro
scripts/base/protocols/smtp/./file-analysis.bro
scripts/base/protocols/smtp/main.bro
scripts/base/protocols/smtp/entities.bro
scripts/base/protocols/smtp/entities-excerpt.bro
scripts/base/protocols/smtp/file-analysis.bro
scripts/base/protocols/socks/__load__.bro
scripts/base/protocols/socks/./consts.bro
scripts/base/protocols/socks/./main.bro
scripts/base/protocols/socks/consts.bro
scripts/base/protocols/socks/main.bro
scripts/base/protocols/ssh/__load__.bro
scripts/base/protocols/ssh/./main.bro
scripts/base/protocols/ssh/main.bro
scripts/base/protocols/syslog/__load__.bro
scripts/base/protocols/syslog/./consts.bro
scripts/base/protocols/syslog/./main.bro
scripts/base/protocols/syslog/consts.bro
scripts/base/protocols/syslog/main.bro
scripts/base/misc/find-checksum-offloading.bro
scripts/policy/misc/loaded-scripts.bro
#close 2013-02-11-18-44-43
#close 2013-05-06-15-17-58

View file

@ -3,5 +3,5 @@
-./frameworks/cluster/nodes/worker.bro
-./frameworks/cluster/setup-connections.bro
-./frameworks/intel/cluster.bro
-./frameworks/metrics/cluster.bro
-./frameworks/notice/cluster.bro
-./frameworks/sumstats/cluster.bro

View file

@ -1,4 +1,6 @@
event statement
event part1
event part2
schedule statement
schedule statement in bro_init
schedule statement in global
schedule statement another in bro_init

View file

@ -1,3 +1,21 @@
[bar=4321, foo=[foo=1234, quux=9876]]
[foo=1234, quux=9876]
9876
[bar=4231, foo=[foo=1000, quux=9876]]
[foo=1000, quux=9876]
9876
[bar=4321, foo=[foo=10, quux=42]]
[foo=10, quux=42]
42
[bar=100, foo=[foo=1234, quux=9876]]
[foo=1234, quux=9876]
9876
[bar=100, foo=[foo=1001, quux=9876]]
[foo=1001, quux=9876]
9876
[bar=100, foo=[foo=11, quux=7]]
[foo=11, quux=7]
7
[a=13, c=13, v=[]]
0
[a=13, c=13, v=[test]]

View file

@ -0,0 +1,7 @@
0
0
0
0
{
}

View file

@ -1,12 +0,0 @@
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path metrics
#open 2012-07-20-01-50-41
#fields ts metric_id filter_name index.host index.str index.network value
#types time enum string addr string subnet count
1342749041.601712 TEST_METRIC foo-bar 6.5.4.3 - - 4
1342749041.601712 TEST_METRIC foo-bar 7.2.1.5 - - 2
1342749041.601712 TEST_METRIC foo-bar 1.2.3.4 - - 6
#close 2012-07-20-01-50-49

View file

@ -1,12 +0,0 @@
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path metrics
#open 2012-07-20-01-49-22
#fields ts metric_id filter_name index.host index.str index.network value
#types time enum string addr string subnet count
1342748962.841548 TEST_METRIC foo-bar 6.5.4.3 - - 2
1342748962.841548 TEST_METRIC foo-bar 7.2.1.5 - - 1
1342748962.841548 TEST_METRIC foo-bar 1.2.3.4 - - 3
#close 2012-07-20-01-49-22

View file

@ -1,10 +0,0 @@
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path notice
#open 2013-02-11-18-41-03
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network
#types time string addr port addr port enum enum string string addr addr port count string table[enum] interval bool string string string double double addr string subnet
1360608063.517719 - - - - - - Test_Notice Threshold crossed by metric_index(host=1.2.3.4) 100/100 - 1.2.3.4 - - 100 manager-1 Notice::ACTION_LOG 3600.000000 F - - - - - 1.2.3.4 - -
#close 2013-02-11-18-41-03

View file

@ -1,11 +0,0 @@
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path notice
#open 2012-07-20-01-49-23
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions policy_items suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network
#types time string addr port addr port enum enum string string addr addr port count string table[enum] table[count] interval bool string string string double double addr string subnet
1342748963.085888 - - - - - - Test_Notice Threshold crossed by metric_index(host=1.2.3.4) 3/2 - 1.2.3.4 - - 3 bro Notice::ACTION_LOG 6 3600.000000 F - - - - - 1.2.3.4 - -
1342748963.085888 - - - - - - Test_Notice Threshold crossed by metric_index(host=6.5.4.3) 2/2 - 6.5.4.3 - - 2 bro Notice::ACTION_LOG 6 3600.000000 F - - - - - 6.5.4.3 - -
#close 2012-07-20-01-49-23

View file

@ -3,8 +3,8 @@
#empty_field (empty)
#unset_field -
#path notice
#open 2013-02-11-18-45-43
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network
#types time string addr port addr port enum enum string string addr addr port count string table[enum] interval bool string string string double double addr string subnet
1360608343.088948 - - - - - - Test_Notice test notice! - - - - - worker-1 Notice::ACTION_LOG 3600.000000 F - - - - - - - -
#close 2013-02-11-18-45-43
#open 2013-04-02-02-21-00
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude
#types time string addr port addr port enum enum string string addr addr port count string table[enum] interval bool string string string double double
1364869260.950557 - - - - - - Test_Notice test notice! - - - - - worker-1 Notice::ACTION_LOG 3600.000000 F - - - - -
#close 2013-04-02-02-21-00

View file

@ -3,8 +3,8 @@
#empty_field (empty)
#unset_field -
#path notice
#open 2013-02-11-18-45-14
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network
#types time string addr port addr port enum enum string string addr addr port count string table[enum] interval bool string string string double double addr string subnet
1360608314.794257 - - - - - - Test_Notice test notice! - - - - - worker-2 Notice::ACTION_LOG 3600.000000 F - - - - - - - -
#close 2013-02-11-18-45-17
#open 2013-04-02-02-21-29
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude
#types time string addr port addr port enum enum string string addr addr port count string table[enum] interval bool string string string double double
1364869289.545369 - - - - - - Test_Notice test notice! - - - - - worker-2 Notice::ACTION_LOG 3600.000000 F - - - - -
#close 2013-04-02-02-21-32

View file

@ -0,0 +1,4 @@
Host: 6.5.4.3 - num:2 - sum:6.0 - avg:3.0 - max:5.0 - min:1.0 - var:8.0 - std_dev:2.8 - unique:2
Host: 10.10.10.10 - num:1 - sum:5.0 - avg:5.0 - max:5.0 - min:5.0 - var:0.0 - std_dev:0.0 - unique:1
Host: 1.2.3.4 - num:9 - sum:437.0 - avg:48.6 - max:95.0 - min:3.0 - var:758.8 - std_dev:27.5 - unique:8
Host: 7.2.1.5 - num:2 - sum:145.0 - avg:72.5 - max:91.0 - min:54.0 - var:684.5 - std_dev:26.2 - unique:2

View file

@ -0,0 +1,3 @@
Host: 6.5.4.3 - num:1 - sum:2.0 - var:0.0 - avg:2.0 - max:2.0 - min:2.0 - std_dev:0.0 - unique:1
Host: 1.2.3.4 - num:5 - sum:221.0 - var:1144.2 - avg:44.2 - max:94.0 - min:5.0 - std_dev:33.8 - unique:4
Host: 7.2.1.5 - num:1 - sum:1.0 - var:0.0 - avg:1.0 - max:1.0 - min:1.0 - std_dev:0.0 - unique:1

View file

@ -0,0 +1,3 @@
A test metric threshold was crossed with a value of: 101.0
End of epoch handler was called
101.0

View file

@ -0,0 +1,6 @@
THRESHOLD_SERIES: hit a threshold series value at 3 for sumstats_key(host=1.2.3.4)
THRESHOLD_SERIES: hit a threshold series value at 6 for sumstats_key(host=1.2.3.4)
THRESHOLD: hit a threshold value at 6 for sumstats_key(host=1.2.3.4)
THRESHOLD_SERIES: hit a threshold series value at 1001 for sumstats_key(host=7.2.1.5)
THRESHOLD: hit a threshold value at 1001 for sumstats_key(host=7.2.1.5)
THRESHOLD WITH RATIO BETWEEN REDUCERS: hit a threshold value at 55x for sumstats_key(host=7.2.1.5)

View file

@ -3,8 +3,8 @@
#empty_field (empty)
#unset_field -
#path notice
#open 2013-02-11-18-33-41
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network
#types time string addr port addr port enum enum string string addr addr port count string table[enum] interval bool string string string double double addr string subnet
1348168976.558309 arKYeMETxOg 192.168.57.103 35391 192.168.57.101 55968 tcp GridFTP::Data_Channel GridFTP data channel over threshold 2 bytes - 192.168.57.103 192.168.57.101 55968 - bro Notice::ACTION_LOG 3600.000000 F - - - - - - - -
#close 2013-02-11-18-33-41
#open 2013-04-02-02-19-21
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude
#types time string addr port addr port enum enum string string addr addr port count string table[enum] interval bool string string string double double
1348168976.558309 arKYeMETxOg 192.168.57.103 35391 192.168.57.101 55968 tcp GridFTP::Data_Channel GridFTP data channel over threshold 2 bytes - 192.168.57.103 192.168.57.101 55968 - bro Notice::ACTION_LOG 3600.000000 F - - - - -
#close 2013-04-02-02-19-21

View file

@ -3,8 +3,8 @@
#empty_field (empty)
#unset_field -
#path socks
#open 2012-06-20-17-23-38
#open 2013-05-02-01-02-50
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version user status request.host request.name request_p bound.host bound.name bound_p
#types time string addr port addr port count string string addr string port addr string port
1340213015.276495 UWkUyAuUGXf 10.0.0.55 53994 60.190.189.214 8124 5 - succeeded - www.osnews.com 80 192.168.0.31 - 2688
#close 2012-06-20-17-28-10
1340213015.276495 arKYeMETxOg 10.0.0.55 53994 60.190.189.214 8124 5 - succeeded - www.osnews.com 80 192.168.0.31 - 2688
#close 2013-05-02-01-02-50

View file

@ -0,0 +1,9 @@
This is a get_vector test: 3
This is a get_vector test: 4
Testing get: 3
Length after get: 1
Size of q2: 4
String queue value: test 1
String queue value: test 2
String queue value: test 2
String queue value: test 1

View file

@ -0,0 +1,11 @@
#separator \x09
#set_separator ,
#empty_field (empty)
#unset_field -
#path notice
#open 2013-04-28-22-36-26
#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude
#types time string addr port addr port enum enum string string addr addr port count string table[enum] interval bool string string string double double
1367188586.649122 - - - - - - Software::Vulnerable_Version 1.2.3.4 is running Java 1.7.0.15 which is vulnerable. Java 1.7.0.15 1.2.3.4 - - - bro Notice::ACTION_LOG 3600.000000 F - - - - -
1367188586.649122 - - - - - - Software::Vulnerable_Version 1.2.3.5 is running Java 1.6.0.43 which is vulnerable. Java 1.6.0.43 1.2.3.5 - - - bro Notice::ACTION_LOG 3600.000000 F - - - - -
#close 2013-04-28-22-36-26

View file

@ -0,0 +1,55 @@
#
# @TEST-EXEC: bro -b %INPUT >out
# @TEST-EXEC: btest-diff out
event bro_init()
{
# unsupported byte lengths
print bytestring_to_count("", T); # 0
print bytestring_to_count("", F); # 0
print bytestring_to_count("\xAA\xBB\xCC", T); # 0
print bytestring_to_count("\xAA\xBB\xCC", F); # 0
print bytestring_to_count("\xAA\xBB\xCC\xDD\xEE", T); # 0
print bytestring_to_count("\xAA\xBB\xCC\xDD\xEE", F); # 0
# 8 bit
print bytestring_to_count("\xff", T); # 255
print bytestring_to_count("\xff", F); # 255
print bytestring_to_count("\x00", T); # 0
print bytestring_to_count("\x00", F); # 0
# 16 bit
print bytestring_to_count("\x03\xe8", F); # 1000
print bytestring_to_count("\xe8\x03", T); # 1000
print bytestring_to_count("\x30\x39", F); # 12345
print bytestring_to_count("\x39\x30", T); # 12345
print bytestring_to_count("\x00\x00", F); # 0
print bytestring_to_count("\x00\x00", T); # 0
# 32 bit
print bytestring_to_count("\x00\x00\xff\xff", F); # 65535
print bytestring_to_count("\xff\xff\x00\x00", T); # 65535
print bytestring_to_count("\xff\xff\xff\xff", F); # 4294967295
print bytestring_to_count("\xff\xff\xff\xff", T); # 4294967295
print bytestring_to_count("\x11\x22\x33\x44", F); # 287454020
print bytestring_to_count("\x11\x22\x33\x44", T); # 1144201745
print bytestring_to_count("\x00\x00\x00\xff", F); # 255
print bytestring_to_count("\xff\x00\x00\x00", T); # 255
print bytestring_to_count("\xAA\xBB\xBB\xAA", F); # 2864429994
print bytestring_to_count("\xAA\xBB\xBB\xAA", T); # 2864429994
print bytestring_to_count("\x00\x00\x00\x00", F); # 0
print bytestring_to_count("\x00\x00\x00\x00", T); # 0
# 64 bit
print bytestring_to_count("\xff\xff\xff\xff\xff\xff\xff\xff", F); # 18446744073709551615
print bytestring_to_count("\xff\xff\xff\xff\xff\xff\xff\xff", T); # 18446744073709551615
print bytestring_to_count("\xff\xff\xff\x00\x00\xff\xff\xff", F); # 18446742974214701055
print bytestring_to_count("\xff\xff\xff\x00\x00\xff\xff\xff", T); # 18446742974214701055
print bytestring_to_count("\x00\x00\x00\x00\x00\x00\xff\xff", F); # 65535
print bytestring_to_count("\xff\xff\x00\x00\x00\x00\x00\x00", T); # 65535
print bytestring_to_count("\x00\x00\x00\x00\x00\x00\x00\x00", T); # 0
print bytestring_to_count("\x00\x00\x00\x00\x00\x00\x00\x00", F); # 0
}

View file

@ -6,33 +6,38 @@
# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks
#
# @TEST-EXEC: btest-bg-run manager-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro -m %INPUT
# @TEST-EXEC: btest-bg-run proxy-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro -m %INPUT
# @TEST-EXEC: sleep 1
# @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT
# @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT
# @TEST-EXEC: btest-bg-wait 60
# @TEST-EXEC: btest-diff manager-1/metrics.log
# @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro -m %INPUT
# @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro -m %INPUT
# @TEST-EXEC: btest-bg-wait 15
@TEST-START-FILE cluster-layout.bro
redef Cluster::nodes = {
["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1", "worker-2")],
["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=37758/tcp, $manager="manager-1", $workers=set("worker-1", "worker-2")],
["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth0"],
["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth1"],
["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $interface="eth0"],
["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $interface="eth1"],
};
@TEST-END-FILE
redef Log::default_rotation_interval = 0secs;
redef enum Metrics::ID += {
TEST_METRIC,
};
global n = 0;
event bro_init() &priority=5
{
Metrics::add_filter(TEST_METRIC,
[$name="foo-bar",
$break_interval=3secs]);
local r1: SumStats::Reducer = [$stream="test", $apply=set(SumStats::SUM, SumStats::MIN, SumStats::MAX, SumStats::AVERAGE, SumStats::STD_DEV, SumStats::VARIANCE, SumStats::UNIQUE)];
SumStats::create([$epoch=5secs,
$reducers=set(r1),
$epoch_finished(rt: SumStats::ResultTable) =
{
for ( key in rt )
{
local r = rt[key]["test"];
print fmt("Host: %s - num:%d - sum:%.1f - avg:%.1f - max:%.1f - min:%.1f - var:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$average, r$max, r$min, r$variance, r$std_dev, r$unique);
}
terminate();
}]);
}
event remote_connection_closed(p: event_peer)
@ -41,43 +46,40 @@ event remote_connection_closed(p: event_peer)
}
global ready_for_data: event();
redef Cluster::manager2worker_events += /ready_for_data/;
@if ( Cluster::local_node_type() == Cluster::WORKER )
redef Cluster::manager2worker_events += /^ready_for_data$/;
event ready_for_data()
{
Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3);
Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2);
Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1);
if ( Cluster::node == "worker-1" )
{
SumStats::observe("test", [$host=1.2.3.4], [$num=34]);
SumStats::observe("test", [$host=1.2.3.4], [$num=30]);
SumStats::observe("test", [$host=6.5.4.3], [$num=1]);
SumStats::observe("test", [$host=7.2.1.5], [$num=54]);
}
if ( Cluster::node == "worker-2" )
{
SumStats::observe("test", [$host=1.2.3.4], [$num=75]);
SumStats::observe("test", [$host=1.2.3.4], [$num=30]);
SumStats::observe("test", [$host=1.2.3.4], [$num=3]);
SumStats::observe("test", [$host=1.2.3.4], [$num=57]);
SumStats::observe("test", [$host=1.2.3.4], [$num=52]);
SumStats::observe("test", [$host=1.2.3.4], [$num=61]);
SumStats::observe("test", [$host=1.2.3.4], [$num=95]);
SumStats::observe("test", [$host=6.5.4.3], [$num=5]);
SumStats::observe("test", [$host=7.2.1.5], [$num=91]);
SumStats::observe("test", [$host=10.10.10.10], [$num=5]);
}
}
@endif
@if ( Cluster::local_node_type() == Cluster::MANAGER )
global n = 0;
global peer_count = 0;
event Metrics::log_metrics(rec: Metrics::Info)
event remote_connection_handshake_done(p: event_peer) &priority=-5
{
n = n + 1;
if ( n == 3 )
{
terminate_communication();
terminate();
}
}
event remote_connection_handshake_done(p: event_peer)
{
print p;
peer_count = peer_count + 1;
if ( peer_count == 3 )
{
++peer_count;
if ( peer_count == 2 )
event ready_for_data();
}
}
@endif

View file

@ -3,12 +3,13 @@
# scripts that block after loading, e.g. start listening on a socket.
#
# Commonly, this test may fail if one forgets to @load some base/ scripts
# when writing a new bro scripts.
# when writing a new bro scripts. Look into "allerrors" to find out
# which script had trouble.
#
# @TEST-SERIALIZE: comm
#
# @TEST-EXEC: test -d $DIST/scripts
# @TEST-EXEC: for script in `find $DIST/scripts/ -name \*\.bro -not -path '*/site/*'`; do echo $script; if echo "$script" | egrep -q 'communication/listen|controllee'; then rm -rf load_attempt .bgprocs; btest-bg-run load_attempt bro -b $script; btest-bg-wait -k 2; cat load_attempt/.stderr >>allerrors; else bro -b $script 2>>allerrors; fi done || exit 0
# @TEST-EXEC: cat allerrors | grep -v "received termination signal" | sort | uniq > unique_errors
# @TEST-EXEC: for script in `find $DIST/scripts/ -name \*\.bro -not -path '*/site/*'`; do echo "=== $script" >>allerrors; if echo "$script" | egrep -q 'communication/listen|controllee'; then rm -rf load_attempt .bgprocs; btest-bg-run load_attempt bro -b $script; btest-bg-wait -k 2; cat load_attempt/.stderr >>allerrors; else bro -b $script 2>>allerrors; fi done || exit 0
# @TEST-EXEC: cat allerrors | grep -v "received termination signal" | grep -v '===' | sort | uniq > unique_errors
# @TEST-EXEC: if [ $(grep -c LibCURL_INCLUDE_DIR-NOTFOUND $BUILD/CMakeCache.txt) -ne 0 ]; then cp unique_errors unique_errors_no_elasticsearch; fi
# @TEST-EXEC: if [ $(grep -c LibCURL_INCLUDE_DIR-NOTFOUND $BUILD/CMakeCache.txt) -ne 0 ]; then btest-diff unique_errors_no_elasticsearch; else btest-diff unique_errors; fi

View file

@ -9,9 +9,9 @@ event e1()
print "Error: this should not happen";
}
event e2()
event e2(s: string)
{
print "schedule statement";
print fmt("schedule statement %s", s);
}
event e3(test: string)
@ -36,7 +36,8 @@ event bro_init()
event e1();
# Test calling an event with "schedule" statement
schedule 1 sec { e2() };
schedule 1 sec { e2("in bro_init") };
schedule 3 sec { e2("another in bro_init") };
# Test calling an event that has two separate definitions
event e3("foo");
@ -47,3 +48,5 @@ event bro_init()
event e5(6); # TODO: this does not do anything
}
# scheduling in outside of an event handler shouldn't crash.
schedule 2sec { e2("in global") };

View file

@ -7,12 +7,42 @@ type MyRecord: record {
v: vector of string &default=vector();
};
event bro_init()
type Foo: record {
foo: count;
quux: count &default=9876;
};
type Bar: record {
bar: count;
foo: Foo &default=[$foo=1234];
};
function print_bar(b: Bar)
{
local r: MyRecord = [$c=13];
print r;
print |r$v|;
r$v[|r$v|] = "test";
print r;
print |r$v|;
print b;
print b$foo;
print b$foo$quux;
}
global bar: Bar = [$bar=4321];
global bar2: Bar = [$bar=4231, $foo=[$foo=1000]];
global bar3: Bar = [$bar=4321, $foo=[$foo=10, $quux=42]];
print_bar(bar);
print_bar(bar2);
print_bar(bar3);
local bar4: Bar = [$bar=100];
local bar5: Bar = [$bar=100, $foo=[$foo=1001]];
local bar6: Bar = [$bar=100, $foo=[$foo=11, $quux=7]];
print_bar(bar4);
print_bar(bar5);
print_bar(bar6);
local r: MyRecord = [$c=13];
print r;
print |r$v|;
r$v[|r$v|] = "test";
print r;
print |r$v|;

View file

@ -0,0 +1,24 @@
# @TEST-EXEC: bro -b %INPUT >out
# @TEST-EXEC: btest-diff out
type Foo: record {
x: count &default=0;
};
global foo: table[count] of Foo = {} &default=[];
# returns the &default value as usual
print(foo[0]$x);
print(foo[1]$x);
# these are essentially no-ops since a copy of the &default value is returned
# by the lookup
foo[0]$x = 0;
foo[1]$x = 1;
# the &default value isn't modified
print(foo[0]$x);
print(foo[1]$x);
# table membership isn't modified
print(foo);

View file

@ -1,78 +0,0 @@
# @TEST-SERIALIZE: comm
#
# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT
# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT
# @TEST-EXEC: sleep 1
# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT
# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT
# @TEST-EXEC: btest-bg-wait 30
# @TEST-EXEC: btest-diff manager-1/metrics.log
@TEST-START-FILE cluster-layout.bro
redef Cluster::nodes = {
["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1", "worker-2")],
["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=37758/tcp, $manager="manager-1", $workers=set("worker-1", "worker-2")],
["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth0"],
["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth1"],
};
@TEST-END-FILE
redef Log::default_rotation_interval = 0secs;
redef enum Metrics::ID += {
TEST_METRIC,
};
event bro_init() &priority=5
{
Metrics::add_filter(TEST_METRIC,
[$name="foo-bar",
$break_interval=3secs]);
}
event remote_connection_closed(p: event_peer)
{
terminate();
}
global ready_for_data: event();
redef Cluster::manager2worker_events += /ready_for_data/;
@if ( Cluster::local_node_type() == Cluster::WORKER )
event ready_for_data()
{
Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3);
Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2);
Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1);
}
@endif
@if ( Cluster::local_node_type() == Cluster::MANAGER )
global n = 0;
global peer_count = 0;
event Metrics::log_metrics(rec: Metrics::Info)
{
n = n + 1;
if ( n == 3 )
{
terminate_communication();
terminate();
}
}
event remote_connection_handshake_done(p: event_peer)
{
print p;
peer_count = peer_count + 1;
if ( peer_count == 3 )
{
event ready_for_data();
}
}
@endif

View file

@ -1,16 +0,0 @@
# @TEST-EXEC: bro %INPUT
# @TEST-EXEC: btest-diff metrics.log
redef enum Metrics::ID += {
TEST_METRIC,
};
event bro_init() &priority=5
{
Metrics::add_filter(TEST_METRIC,
[$name="foo-bar",
$break_interval=3secs]);
Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3);
Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2);
Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1);
}

View file

@ -1,73 +0,0 @@
# @TEST-SERIALIZE: comm
#
# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT
# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT
# @TEST-EXEC: sleep 1
# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT
# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT
# @TEST-EXEC: btest-bg-wait 20
# @TEST-EXEC: btest-diff manager-1/notice.log
@TEST-START-FILE cluster-layout.bro
redef Cluster::nodes = {
["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1")],
["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=37758/tcp, $manager="manager-1", $workers=set("worker-1")],
["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth0"],
["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth1"],
};
@TEST-END-FILE
redef Log::default_rotation_interval = 0secs;
redef enum Notice::Type += {
Test_Notice,
};
redef enum Metrics::ID += {
TEST_METRIC,
};
event bro_init() &priority=5
{
Metrics::add_filter(TEST_METRIC,
[$name="foo-bar",
$break_interval=1hr,
$note=Test_Notice,
$notice_threshold=100,
$log=T]);
}
event remote_connection_closed(p: event_peer)
{
terminate();
}
@if ( Cluster::local_node_type() == Cluster::MANAGER )
event Notice::log_notice(rec: Notice::Info)
{
terminate_communication();
terminate();
}
@endif
@if ( Cluster::local_node_type() == Cluster::WORKER )
event do_metrics(i: count)
{
# Worker-1 will trigger an intermediate update and then if everything
# works correctly, the data from worker-2 will hit the threshold and
# should trigger the notice.
Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], i);
}
event bro_init()
{
if ( Cluster::node == "worker-1" )
schedule 2sec { do_metrics(99) };
if ( Cluster::node == "worker-2" )
event do_metrics(1);
}
@endif

View file

@ -0,0 +1,82 @@
# @TEST-SERIALIZE: comm
#
# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT
# @TEST-EXEC: sleep 1
# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT
# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT
# @TEST-EXEC: btest-bg-wait 15
# @TEST-EXEC: btest-diff manager-1/.stdout
@TEST-START-FILE cluster-layout.bro
redef Cluster::nodes = {
["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1", "worker-2")],
["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $interface="eth0"],
["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $interface="eth1"],
};
@TEST-END-FILE
redef Log::default_rotation_interval = 0secs;
global n = 0;
event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="test", $apply=set(SumStats::SUM, SumStats::MIN, SumStats::MAX, SumStats::AVERAGE, SumStats::STD_DEV, SumStats::VARIANCE, SumStats::UNIQUE)];
SumStats::create([$epoch=5secs,
$reducers=set(r1),
$epoch_finished(rt: SumStats::ResultTable) =
{
for ( key in rt )
{
local r = rt[key]["test"];
print fmt("Host: %s - num:%d - sum:%.1f - avg:%.1f - max:%.1f - min:%.1f - var:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$average, r$max, r$min, r$variance, r$std_dev, r$unique);
}
terminate();
}]);
}
event remote_connection_closed(p: event_peer)
{
terminate();
}
global ready_for_data: event();
redef Cluster::manager2worker_events += /^ready_for_data$/;
event ready_for_data()
{
if ( Cluster::node == "worker-1" )
{
SumStats::observe("test", [$host=1.2.3.4], [$num=34]);
SumStats::observe("test", [$host=1.2.3.4], [$num=30]);
SumStats::observe("test", [$host=6.5.4.3], [$num=1]);
SumStats::observe("test", [$host=7.2.1.5], [$num=54]);
}
if ( Cluster::node == "worker-2" )
{
SumStats::observe("test", [$host=1.2.3.4], [$num=75]);
SumStats::observe("test", [$host=1.2.3.4], [$num=30]);
SumStats::observe("test", [$host=1.2.3.4], [$num=3]);
SumStats::observe("test", [$host=1.2.3.4], [$num=57]);
SumStats::observe("test", [$host=1.2.3.4], [$num=52]);
SumStats::observe("test", [$host=1.2.3.4], [$num=61]);
SumStats::observe("test", [$host=1.2.3.4], [$num=95]);
SumStats::observe("test", [$host=6.5.4.3], [$num=5]);
SumStats::observe("test", [$host=7.2.1.5], [$num=91]);
SumStats::observe("test", [$host=10.10.10.10], [$num=5]);
}
}
@if ( Cluster::local_node_type() == Cluster::MANAGER )
global peer_count = 0;
event remote_connection_handshake_done(p: event_peer) &priority=-5
{
++peer_count;
if ( peer_count == 2 )
event ready_for_data();
}
@endif

View file

@ -0,0 +1,34 @@
# @TEST-EXEC: bro %INPUT
# @TEST-EXEC: btest-diff .stdout
event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="test.metric",
$apply=set(SumStats::SUM,
SumStats::VARIANCE,
SumStats::AVERAGE,
SumStats::MAX,
SumStats::MIN,
SumStats::STD_DEV,
SumStats::UNIQUE)];
SumStats::create([$epoch=3secs,
$reducers=set(r1),
$epoch_finished(data: SumStats::ResultTable) =
{
for ( key in data )
{
local r = data[key]["test.metric"];
print fmt("Host: %s - num:%d - sum:%.1f - var:%.1f - avg:%.1f - max:%.1f - min:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$variance, r$average, r$max, r$min, r$std_dev, r$unique);
}
}
]);
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=5]);
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=22]);
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=94]);
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=50]);
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=50]);
SumStats::observe("test.metric", [$host=6.5.4.3], [$num=2]);
SumStats::observe("test.metric", [$host=7.2.1.5], [$num=1]);
}

View file

@ -0,0 +1,70 @@
# @TEST-SERIALIZE: comm
#
# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT
# @TEST-EXEC: sleep 3
# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT
# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT
# @TEST-EXEC: btest-bg-wait 20
# @TEST-EXEC: btest-diff manager-1/.stdout
@TEST-START-FILE cluster-layout.bro
redef Cluster::nodes = {
["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1", "worker-2")],
["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $interface="eth0"],
["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $interface="eth1"],
};
@TEST-END-FILE
redef Log::default_rotation_interval = 0secs;
event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)];
SumStats::create([$epoch=10secs,
$reducers=set(r1),
$epoch_finished(data: SumStats::ResultTable) =
{
print "End of epoch handler was called";
for ( res in data )
print data[res]["test.metric"]$sum;
terminate();
},
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["test.metric"]$sum);
},
$threshold=100,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
print fmt("A test metric threshold was crossed with a value of: %.1f", result["test.metric"]$sum);
}]);
}
event remote_connection_closed(p: event_peer)
{
terminate();
}
event do_stats(i: count)
{
# Worker-1 will trigger an intermediate update and then if everything
# works correctly, the data from worker-2 will hit the threshold and
# should trigger the notice.
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=i]);
}
event remote_connection_handshake_done(p: event_peer)
{
if ( p$descr == "manager-1" )
{
if ( Cluster::node == "worker-1" )
{
schedule 0.1sec { do_stats(1) };
schedule 5secs { do_stats(60) };
}
if ( Cluster::node == "worker-2" )
schedule 0.5sec { do_stats(40) };
}
}

View file

@ -0,0 +1,73 @@
# @TEST-EXEC: bro %INPUT
# @TEST-EXEC: btest-diff .stdout
redef enum Notice::Type += {
Test_Notice,
};
event bro_init() &priority=5
{
local r1: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)];
SumStats::create([$epoch=3secs,
$reducers=set(r1),
#$threshold_val = SumStats::sum_threshold("test.metric"),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["test.metric"]$sum);
},
$threshold=5,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
local r = result["test.metric"];
print fmt("THRESHOLD: hit a threshold value at %.0f for %s", r$sum, SumStats::key2str(key));
}
]);
local r2: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)];
SumStats::create([$epoch=3secs,
$reducers=set(r2),
#$threshold_val = SumStats::sum_threshold("test.metric"),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return double_to_count(result["test.metric"]$sum);
},
$threshold_series=vector(3,6,800),
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
local r = result["test.metric"];
print fmt("THRESHOLD_SERIES: hit a threshold series value at %.0f for %s", r$sum, SumStats::key2str(key));
}
]);
local r3: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)];
local r4: SumStats::Reducer = [$stream="test.metric2", $apply=set(SumStats::SUM)];
SumStats::create([$epoch=3secs,
$reducers=set(r3, r4),
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
# Calculate a ratio between sums of two reducers.
if ( "test.metric2" in result && "test.metric" in result &&
result["test.metric"]$sum > 0 )
return double_to_count(result["test.metric2"]$sum / result["test.metric"]$sum);
else
return 0;
},
# Looking for metric2 sum to be 5 times the sum of metric
$threshold=5,
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
local thold = result["test.metric2"]$sum / result["test.metric"]$sum;
print fmt("THRESHOLD WITH RATIO BETWEEN REDUCERS: hit a threshold value at %.0fx for %s", thold, SumStats::key2str(key));
}
]);
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=3]);
SumStats::observe("test.metric", [$host=6.5.4.3], [$num=2]);
SumStats::observe("test.metric", [$host=7.2.1.5], [$num=1]);
SumStats::observe("test.metric", [$host=1.2.3.4], [$num=3]);
SumStats::observe("test.metric", [$host=7.2.1.5], [$num=1000]);
SumStats::observe("test.metric2", [$host=7.2.1.5], [$num=10]);
SumStats::observe("test.metric2", [$host=7.2.1.5], [$num=1000]);
SumStats::observe("test.metric2", [$host=7.2.1.5], [$num=54321]);
}

View file

@ -0,0 +1,33 @@
# @TEST-EXEC: bro -b %INPUT > output
# @TEST-EXEC: btest-diff output
# This is loaded by default
@load base/utils/queue
event bro_init()
{
local q = Queue::init([$max_len=2]);
Queue::put(q, 1);
Queue::put(q, 2);
Queue::put(q, 3);
Queue::put(q, 4);
local test1: vector of count = vector();
Queue::get_vector(q, test1);
for ( i in test1 )
print fmt("This is a get_vector test: %d", test1[i]);
local test_val = Queue::get(q);
print fmt("Testing get: %s", test_val);
print fmt("Length after get: %d", Queue::len(q));
local q2 = Queue::init([]);
Queue::put(q2, "test 1");
Queue::put(q2, "test 2");
Queue::put(q2, "test 2");
Queue::put(q2, "test 1");
print fmt("Size of q2: %d", Queue::len(q2));
local test3: vector of string = vector();
Queue::get_vector(q2, test3);
for ( i in test3 )
print fmt("String queue value: %s", test3[i]);
}

View file

@ -0,0 +1,23 @@
# @TEST-EXEC: bro %INPUT
# @TEST-EXEC: btest-diff notice.log
@load frameworks/software/vulnerable
redef Software::asset_tracking = ALL_HOSTS;
global java_1_6_vuln: Software::VulnerableVersionRange = [$max=[$major=1,$minor=6,$minor2=0,$minor3=43]];
global java_1_7_vuln: Software::VulnerableVersionRange = [$min=[$major=1,$minor=7], $max=[$major=1,$minor=7,$minor2=0,$minor3=20]];
redef Software::vulnerable_versions += {
["Java"] = set(java_1_6_vuln, java_1_7_vuln)
};
event bro_init()
{
Software::found([$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=4.3.2.1, $resp_p=80/tcp],
[$name="Java", $host=1.2.3.4, $version=[$major=1, $minor=7, $minor2=0, $minor3=15]]);
Software::found([$orig_h=1.2.3.5, $orig_p=1234/tcp, $resp_h=4.3.2.1, $resp_p=80/tcp],
[$name="Java", $host=1.2.3.5, $version=[$major=1, $minor=6, $minor2=0, $minor3=43]]);
Software::found([$orig_h=1.2.3.6, $orig_p=1234/tcp, $resp_h=4.3.2.1, $resp_p=80/tcp],
[$name="Java", $host=1.2.3.6, $version=[$major=1, $minor=6, $minor2=0, $minor3=50]]);
}