Merge remote-tracking branch 'origin/master' into topic/gilbert/ascii-header

Conflicts:
	testing/btest/Baseline/core.print-bpf-filters-ipv4/conn.log
	testing/btest/Baseline/core.vlan-mpls/conn.log
	testing/btest/Baseline/policy.frameworks.logging.remote-types/receiver.test.log
	testing/btest/Baseline/policy.frameworks.logging.types/ssh.log
	testing/btest/btest.cfg
This commit is contained in:
Gilbert Clark gc355804@ohio.edu 2011-08-30 11:25:55 -07:00
commit 5b6329577f
276 changed files with 1541 additions and 934 deletions

35
CHANGES
View file

@ -1,4 +1,39 @@
1.6-dev-1120 | 2011-08-19 19:00:15 -0700
* Fix for the CompHash fix. (Robin Sommer)
1.6-dev-1118 | 2011-08-18 14:11:55 -0700
* Fixing key size calculation in composite hash code. (Robin Sommer)
1.6-dev-1116 | 2011-08-18 10:05:07 -0700
* Remove the 'net' type from Bro (addresses #535).
* Fix H3 assumption of an 8-bit byte/char. (Jon Siwek)
* Allow reading from interface without additional script arguments.
Explicitly passing in '-' as an additional command line argument
still allows reading a script from stdin. (Jon Siwek)
* SSH bruteforcing detection now done with metrics framework. (Seth
Hall)
* Updates for SQL injection attack detection to match the metrics
framework updates. (Seth Hall)
* Metrics framework now works on cluster setups. (Seth Hall)
* Reclassifying more DNS manager errors as non-fatal errors. (Robin
Sommer)
* Fix ConnSize_Analyzer when used in conjunction with connection
compressor. (Gregor Maier)
* Fix reporter using part of the actual message as a format string.
(Jon Siwek)
1.6-dev-1095 | 2011-08-13 11:59:07 -0700
* A larger number of script documentation updates. Closes #543. (Jon

View file

@ -1 +1 @@
1.6-dev-1095
1.6-dev-1120

@ -1 +1 @@
Subproject commit 03e6d398edf422140ba9f50e6fabbec33ee2f3cb
Subproject commit 743f10dda8cd5655ea3dc6eb705ff5414ed4f535

@ -1 +1 @@
Subproject commit c39622855e3c3a5cc94c7376f86184ed1db1939a
Subproject commit cf4ce9dfc5d6dc4e6d311955eeaec2d679e8669b

View file

@ -119,7 +119,7 @@ macro(REST_TARGET srcDir broInput)
ARGS -E remove_directory .state
# generate the reST documentation using bro
COMMAND BROPATH=${BROPATH}:${srcDir} ${CMAKE_BINARY_DIR}/src/bro
ARGS --doc-scripts ${broInput} || (rm -rf .state *.log *.rst && exit 1)
ARGS -b -Z ${broInput} || (rm -rf .state *.log *.rst && exit 1)
# move generated doc into a new directory tree that
# defines the final structure of documents
COMMAND "${CMAKE_COMMAND}"

View file

@ -16,13 +16,13 @@ rest_target(${CMAKE_CURRENT_SOURCE_DIR} example.bro internal)
rest_target(${psd} base/init-default.bro internal)
rest_target(${psd} base/init-bare.bro internal)
rest_target(${CMAKE_BINARY_DIR}/src bro.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src const.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src event.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src logging.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src reporter.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src strings.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src types.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src/base bro.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src/base const.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src/base event.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src/base logging.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src/base reporter.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src/base strings.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src/base types.bif.bro)
rest_target(${psd} base/frameworks/cluster/main.bro)
rest_target(${psd} base/frameworks/cluster/nodes/manager.bro)
rest_target(${psd} base/frameworks/cluster/nodes/proxy.bro)
@ -34,7 +34,9 @@ rest_target(${psd} base/frameworks/dpd/main.bro)
rest_target(${psd} base/frameworks/intel/main.bro)
rest_target(${psd} base/frameworks/logging/main.bro)
rest_target(${psd} base/frameworks/logging/writers/ascii.bro)
rest_target(${psd} base/frameworks/metrics/cluster.bro)
rest_target(${psd} base/frameworks/metrics/main.bro)
rest_target(${psd} base/frameworks/metrics/non-cluster.bro)
rest_target(${psd} base/frameworks/notice/actions/add-geodata.bro)
rest_target(${psd} base/frameworks/notice/actions/drop.bro)
rest_target(${psd} base/frameworks/notice/actions/email_admin.bro)
@ -64,11 +66,11 @@ rest_target(${psd} base/protocols/http/partial-content.bro)
rest_target(${psd} base/protocols/http/utils.bro)
rest_target(${psd} base/protocols/irc/dcc-send.bro)
rest_target(${psd} base/protocols/irc/main.bro)
rest_target(${psd} base/protocols/mime/base.bro)
rest_target(${psd} base/protocols/mime/file-extract.bro)
rest_target(${psd} base/protocols/mime/file-hash.bro)
rest_target(${psd} base/protocols/mime/file-ident.bro)
rest_target(${psd} base/protocols/rpc/base.bro)
rest_target(${psd} base/protocols/mime/main.bro)
rest_target(${psd} base/protocols/rpc/main.bro)
rest_target(${psd} base/protocols/smtp/entities-excerpt.bro)
rest_target(${psd} base/protocols/smtp/entities.bro)
rest_target(${psd} base/protocols/smtp/main.bro)
@ -99,8 +101,7 @@ rest_target(${psd} policy/frameworks/metrics/http-example.bro)
rest_target(${psd} policy/frameworks/metrics/ssl-example.bro)
rest_target(${psd} policy/frameworks/software/version-changes.bro)
rest_target(${psd} policy/frameworks/software/vulnerable.bro)
rest_target(${psd} policy/integration/barnyard2/base.bro)
rest_target(${psd} policy/integration/barnyard2/event.bro)
rest_target(${psd} policy/integration/barnyard2/main.bro)
rest_target(${psd} policy/integration/barnyard2/types.bro)
rest_target(${psd} policy/misc/analysis-groups.bro)
rest_target(${psd} policy/misc/loaded-scripts.bro)

View file

@ -68,12 +68,12 @@ sourcedir=${thisdir}/../..
echo "$statictext" > $outfile
bifs=`( cd ${sourcedir}/build/src && find . -name \*\.bro | sort )`
bifs=`( cd ${sourcedir}/src && find . -name \*\.bif | sort )`
for file in $bifs
do
f=${file:2}
echo "rest_target(\${CMAKE_BINARY_DIR}/src $f)" >> $outfile
f=${file:2}.bro
echo "rest_target(\${CMAKE_BINARY_DIR}/src/base $f)" >> $outfile
done
scriptfiles=`( cd ${sourcedir}/scripts && find . -name \*\.bro | sort )`

View file

@ -1,7 +1,7 @@
# Load the core cluster support.
@load ./main
@if ( Cluster::node != "" )
@if ( Cluster::is_enabled() )
# Give the node being started up it's peer name.
redef peer_description = Cluster::node;
@ -19,22 +19,22 @@ redef peer_description = Cluster::node;
@load ./setup-connections
# Don't start the listening process until we're a bit more sure that the
# Don't load the listening script until we're a bit more sure that the
# cluster framework is actually being enabled.
@load frameworks/communication/listen-clear
## Set the port that this node is supposed to listen on.
redef Communication::listen_port_clear = Cluster::nodes[Cluster::node]$p;
@if ( Cluster::nodes[Cluster::node]$node_type == Cluster::MANAGER )
@if ( Cluster::local_node_type() == Cluster::MANAGER )
@load ./nodes/manager
@endif
@if ( Cluster::nodes[Cluster::node]$node_type == Cluster::PROXY )
@if ( Cluster::local_node_type() == Cluster::PROXY )
@load ./nodes/proxy
@endif
@if ( Cluster::nodes[Cluster::node]$node_type == Cluster::WORKER )
@if ( Cluster::local_node_type() == Cluster::WORKER )
@load ./nodes/worker
@endif

View file

@ -1,3 +1,4 @@
@load base/frameworks/control
module Cluster;
@ -9,6 +10,7 @@ export {
} &log;
type NodeType: enum {
NONE,
CONTROL,
MANAGER,
PROXY,
@ -47,6 +49,25 @@ export {
time_machine: string &optional;
};
## This function can be called at any time to determine if the cluster
## framework is being enabled for this run.
global is_enabled: function(): bool;
## This function can be called at any time to determine what type of
## cluster node the current Bro instance is going to be acting as.
## If :bro:id:`Cluster::is_enabled` returns false, then
## :bro:enum:`Cluster::NONE` is returned.
global local_node_type: function(): NodeType;
## This gives the value for the number of workers currently connected to,
## and it's maintained internally by the cluster framework. It's
## primarily intended for use by managers to find out how many workers
## should be responding to requests.
global worker_count: count = 0;
## The cluster layout definition. This should be placed into a filter
## named cluster-layout.bro somewhere in the BROPATH. It will be
## automatically loaded if the CLUSTER_NODE environment variable is set.
const nodes: table[string] of Node = {} &redef;
## This is usually supplied on the command line for each instance
@ -54,13 +75,34 @@ export {
const node = getenv("CLUSTER_NODE") &redef;
}
event bro_init()
function is_enabled(): bool
{
return (node != "");
}
function local_node_type(): NodeType
{
return is_enabled() ? nodes[node]$node_type : NONE;
}
event remote_connection_handshake_done(p: event_peer)
{
if ( nodes[p$descr]$node_type == WORKER )
++worker_count;
}
event remote_connection_closed(p: event_peer)
{
if ( nodes[p$descr]$node_type == WORKER )
--worker_count;
}
event bro_init() &priority=5
{
# If a node is given, but it's an unknown name we need to fail.
if ( node != "" && node !in nodes )
{
local msg = "You didn't supply a valid node in the Cluster::nodes configuration.";
event reporter_error(current_time(), msg, "");
Reporter::error(fmt("'%s' is not a valid node in the Cluster::nodes configuration", node));
terminate();
}

View file

@ -8,6 +8,8 @@
##! This is where the cluster manager sets it's specific settings for other
##! frameworks and in the core.
@load base/frameworks/notice
@prefixes += cluster-manager
# Load the script for local site configuration for the manager node.

View file

@ -1,3 +1,4 @@
@load base/frameworks/notice
@prefixes += cluster-worker

View file

@ -1,5 +1,7 @@
@load ./main
@load base/frameworks/communication/main
@load base/frameworks/communication
@if ( Cluster::node in Cluster::nodes )
module Cluster;
@ -60,13 +62,12 @@ event bro_init() &priority=9
$connect=T, $retry=1mins,
$class=node];
}
else if ( me$node_type == WORKER )
{
if ( n$node_type == MANAGER && me$manager == i )
Communication::nodes["manager"] = [$host=nodes[i]$ip, $p=nodes[i]$p,
$connect=T, $retry=1mins,
$class=node];
$class=node, $events=manager_events];
if ( n$node_type == PROXY && me$proxy == i )
Communication::nodes["proxy"] = [$host=nodes[i]$ip, $p=nodes[i]$p,
@ -80,3 +81,5 @@ event bro_init() &priority=9
}
}
}
@endif

View file

@ -1,6 +1,8 @@
##! Connect to remote Bro or Broccoli instances to share state and/or transfer
##! events.
@load base/frameworks/packet-filter
module Communication;
export {

View file

@ -20,6 +20,8 @@
# canary
# friend
@load base/frameworks/notice
module Intel;
export {

View file

@ -159,7 +159,7 @@ export {
# We keep a script-level copy of all filters so that we can manipulate them.
global filters: table[ID, string] of Filter;
@load logging.bif.bro # Needs Filter and Stream defined.
@load base/logging.bif # Needs Filter and Stream defined.
module Log;

View file

@ -1 +1,11 @@
@load ./main
# The cluster framework must be loaded first.
@load base/frameworks/cluster
# Load either the cluster support script or the non-cluster support script.
@if ( Cluster::is_enabled() )
@load ./cluster
@else
@load ./non-cluster
@endif

View file

@ -0,0 +1,262 @@
##! This implements transparent cluster support for the metrics framework.
##! Do not load this file directly. It's only meant to be loaded automatically
##! and will be depending on if the cluster framework has been enabled.
##! The goal of this script is to make metric calculation completely and
##! transparently automated when running on a cluster.
##!
##! Events defined here are not exported deliberately because they are meant
##! to be an internal implementation detail.
@load base/frameworks/cluster
@load ./main
module Metrics;
export {
## This value allows a user to decide how large of result groups the
## workers should transmit values.
const cluster_send_in_groups_of = 50 &redef;
## This is the percent of the full threshold value that needs to be met
## on a single worker for that worker to send the value to its manager in
## order for it to request a global view for that value. There is no
## requirement that the manager requests a global view for the index
## since it may opt not to if it requested a global view for the index
## recently.
const cluster_request_global_view_percent = 0.1 &redef;
}
## This event is sent by the manager in a cluster to initiate the
## collection of metrics values for a filter.
global cluster_filter_request: event(uid: string, id: ID, filter_name: string);
## This event is sent by nodes that are collecting metrics after receiving
## a request for the metric filter from the manager.
global cluster_filter_response: event(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool);
## This event is sent by the manager in a cluster to initiate the
## collection of a single index value from a filter. It's typically
## used to get intermediate updates before the break interval triggers
## to speed detection of a value crossing a threshold.
global cluster_index_request: event(uid: string, id: ID, filter_name: string, index: Index);
## This event is sent by nodes in response to a
## :bro:id:`cluster_index_request` event.
global cluster_index_response: event(uid: string, id: ID, filter_name: string, index: Index, val: count);
## This is sent by workers to indicate that they crossed the percent of the
## current threshold by the percentage defined globally in
## :bro:id:`cluster_request_global_view_percent`
global cluster_index_intermediate_response: event(id: Metrics::ID, filter_name: string, index: Metrics::Index, val: count);
## This event is scheduled internally on workers to send result chunks.
global send_data: event(uid: string, id: ID, filter_name: string, data: MetricTable);
# This is maintained by managers so they can know what data they requested and
# when they requested it.
global requested_results: table[string] of time = table() &create_expire=5mins;
# TODO: The next 4 variables make the assumption that a value never
# takes longer than 5 minutes to transmit from workers to manager. This needs to
# be tunable or self-tuning. These should also be restructured to be
# maintained within a single variable.
# This variable is maintained by manager nodes as they collect and aggregate
# results.
global filter_results: table[string, ID, string] of MetricTable &create_expire=5mins;
# This variable is maintained by manager nodes to track how many "dones" they
# collected per collection unique id. Once the number of results for a uid
# matches the number of peer nodes that results should be coming from, the
# result is written out and deleted from here.
# TODO: add an &expire_func in case not all results are received.
global done_with: table[string] of count &create_expire=5mins &default=0;
# This variable is maintained by managers to track intermediate responses as
# they are getting a global view for a certain index.
global index_requests: table[string, ID, string, Index] of count &create_expire=5mins &default=0;
# This variable is maintained by all hosts for different purposes. Non-managers
# maintain it to know what indexes they have recently sent as intermediate
# updates so they don't overwhelm their manager. Managers maintain it so they
# don't overwhelm workers with intermediate index requests. The count that is
# yielded is the number of times the percentage threshold has been crossed and
# an intermediate result has been received. The manager may optionally request
# the index again before data expires from here if too many workers are crossing
# the percentage threshold (not implemented yet!).
global recent_global_view_indexes: table[ID, string, Index] of count &create_expire=5mins &default=0;
# Add events to the cluster framework to make this work.
redef Cluster::manager_events += /Metrics::cluster_(filter_request|index_request)/;
redef Cluster::worker_events += /Metrics::cluster_(filter_response|index_response|index_intermediate_response)/;
@if ( Cluster::local_node_type() != Cluster::MANAGER )
# This is done on all non-manager node types in the event that a metric is
# being collected somewhere other than a worker.
function data_added(filter: Filter, index: Index, val: count)
{
# If an intermediate update for this value was sent recently, don't send
# it again.
if ( [filter$id, filter$name, index] in recent_global_view_indexes )
return;
# If val is 5 and global view % is 0.1 (10%), pct_val will be 50. If that
# crosses the full threshold then it's a candidate to send as an
# intermediate update.
local pct_val = double_to_count(val / cluster_request_global_view_percent);
if ( check_notice(filter, index, pct_val) )
{
# kick off intermediate update
event Metrics::cluster_index_intermediate_response(filter$id, filter$name, index, val);
++recent_global_view_indexes[filter$id, filter$name, index];
}
}
event Metrics::send_data(uid: string, id: ID, filter_name: string, data: MetricTable)
{
#print fmt("WORKER %s: sending data for uid %s...", Cluster::node, uid);
local local_data: MetricTable;
local num_added = 0;
for ( index in data )
{
local_data[index] = data[index];
delete data[index];
# Only send cluster_send_in_groups_of at a time. Queue another
# event to send the next group.
if ( cluster_send_in_groups_of == ++num_added )
break;
}
local done = F;
# If data is empty, this metric is done.
if ( |data| == 0 )
done = T;
event Metrics::cluster_filter_response(uid, id, filter_name, local_data, done);
if ( ! done )
event Metrics::send_data(uid, id, filter_name, data);
}
event Metrics::cluster_filter_request(uid: string, id: ID, filter_name: string)
{
#print fmt("WORKER %s: received the cluster_filter_request event.", Cluster::node);
# Initiate sending all of the data for the requested filter.
event Metrics::send_data(uid, id, filter_name, store[id, filter_name]);
# Lookup the actual filter and reset it, the reference to the data
# currently stored will be maintained interally by the send_data event.
reset(filter_store[id, filter_name]);
}
event Metrics::cluster_index_request(uid: string, id: ID, filter_name: string, index: Index)
{
local val=0;
if ( index in store[id, filter_name] )
val = store[id, filter_name][index];
# fmt("WORKER %s: received the cluster_index_request event for %s=%d.", Cluster::node, index2str(index), val);
event Metrics::cluster_index_response(uid, id, filter_name, index, val);
}
@endif
@if ( Cluster::local_node_type() == Cluster::MANAGER )
# Manager's handle logging.
event Metrics::log_it(filter: Filter)
{
#print fmt("%.6f MANAGER: breaking %s filter for %s metric", network_time(), filter$name, filter$id);
local uid = unique_id("");
# Set some tracking variables.
requested_results[uid] = network_time();
filter_results[uid, filter$id, filter$name] = table();
# Request data from peers.
event Metrics::cluster_filter_request(uid, filter$id, filter$name);
# Schedule the log_it event for the next break period.
schedule filter$break_interval { Metrics::log_it(filter) };
}
# This is unlikely to be called often, but it's here in case there are metrics
# being collected by managers.
function data_added(filter: Filter, index: Index, val: count)
{
if ( check_notice(filter, index, val) )
do_notice(filter, index, val);
}
event Metrics::cluster_index_response(uid: string, id: ID, filter_name: string, index: Index, val: count)
{
#print fmt("%0.6f MANAGER: receiving index data from %s", network_time(), get_event_peer()$descr);
if ( [uid, id, filter_name, index] !in index_requests )
index_requests[uid, id, filter_name, index] = 0;
index_requests[uid, id, filter_name, index] += val;
local ir = index_requests[uid, id, filter_name, index];
++done_with[uid];
if ( Cluster::worker_count == done_with[uid] )
{
if ( check_notice(filter_store[id, filter_name], index, ir) )
do_notice(filter_store[id, filter_name], index, ir);
delete done_with[uid];
delete index_requests[uid, id, filter_name, index];
}
}
# Managers handle intermediate updates here.
event Metrics::cluster_index_intermediate_response(id: ID, filter_name: string, index: Index, val: count)
{
#print fmt("MANAGER: receiving intermediate index data from %s", get_event_peer()$descr);
#print fmt("MANAGER: requesting index data for %s", index2str(index));
local uid = unique_id("");
event Metrics::cluster_index_request(uid, id, filter_name, index);
++recent_global_view_indexes[id, filter_name, index];
}
event Metrics::cluster_filter_response(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool)
{
#print fmt("MANAGER: receiving results from %s", get_event_peer()$descr);
local local_data = filter_results[uid, id, filter_name];
for ( index in data )
{
if ( index !in local_data )
local_data[index] = 0;
local_data[index] += data[index];
}
# Mark another worker as being "done" for this uid.
if ( done )
++done_with[uid];
# If the data has been collected from all peers, we are done and ready to log.
if ( Cluster::worker_count == done_with[uid] )
{
local ts = network_time();
# Log the time this was initially requested if it's available.
if ( uid in requested_results )
{
ts = requested_results[uid];
delete requested_results[uid];
}
write_log(ts, filter_store[id, filter_name], local_data);
# Clean up
delete filter_results[uid, id, filter_name];
delete done_with[uid];
}
}
@endif

View file

@ -1,5 +1,7 @@
##! This is the implementation of the metrics framework.
@load base/frameworks/notice
module Metrics;
export {
@ -13,16 +15,11 @@ export {
## current value to the logging stream.
const default_break_interval = 15mins &redef;
type Info: record {
ts: time &log;
metric_id: ID &log;
filter_name: string &log;
agg_subnet: string &log &optional;
index: string &log &optional;
value: count &log;
};
## This is the interval for how often notices will happen after they have
## already fired.
const renotice_interval = 1hr &redef;
type Entry: record {
type Index: record {
## Host is the value to which this metric applies.
host: addr &optional;
@ -33,11 +30,19 @@ export {
## value in a Host header. This is an example of a non-host based
## metric since multiple IP addresses could respond for the same Host
## header value.
index: string &default="";
str: string &optional;
## The value by which the counter should be increased in each filter
## where this entry is accepted.
increment: count &default=1;
## The CIDR block that this metric applies to. This is typically
## only used internally for host based aggregation.
network: subnet &optional;
} &log;
type Info: record {
ts: time &log;
metric_id: ID &log;
filter_name: string &log;
index: Index &log;
value: count &log;
};
# TODO: configure a metrics filter logging stream to log the current
@ -52,104 +57,108 @@ export {
name: string &default="default";
## A predicate so that you can decide per index if you would like
## to accept the data being inserted.
pred: function(entry: Entry): bool &optional;
pred: function(index: Index): bool &optional;
## Global mask by which you'd like to aggregate traffic.
aggregation_mask: count &optional;
## This is essentially applying names to various subnets.
aggregation_table: table[subnet] of string &optional;
## This is essentially a mapping table between addresses and subnets.
aggregation_table: table[subnet] of subnet &optional;
## The interval at which the metric should be "broken" and written
## to the logging stream.
## to the logging stream. The counters are also reset to zero at
## this time so any threshold based detection needs to be set to a
## number that should be expected to happen within this period.
break_interval: interval &default=default_break_interval;
## This determines if the result of this filter is sent to the metrics
## logging stream. One use for the logging framework is as an internal
## thresholding and statistics gathering utility that is meant to
## never log but rather to generate notices and derive data.
log: bool &default=T;
## If this and a $notice_threshold value are set, this notice type
## will be generated by the metrics framework.
note: Notice::Type &optional;
## A straight threshold for generating a notice.
notice_threshold: count &optional;
## A series of thresholds at which to generate notices.
notice_thresholds: vector of count &optional;
## If this and a $notice_threshold value are set, this notice type
## will be generated by the metrics framework.
note: Notice::Type &optional;
## How often this notice should be raised for this metric index. It
## will be generated everytime it crosses a threshold, but if the
## $break_interval is set to 5mins and this is set to 1hr the notice
## only be generated once per hour even if something crosses the
## threshold in every break interval.
notice_freq: interval &optional;
};
global add_filter: function(id: ID, filter: Filter);
global add_data: function(id: ID, entry: Entry);
global add_data: function(id: ID, index: Index, increment: count);
global index2str: function(index: Index): string;
# This is the event that is used to "finish" metrics and adapt the metrics
# framework for clustered or non-clustered usage.
global log_it: event(filter: Filter);
global log_metrics: event(rec: Info);
}
global metric_filters: table[ID] of vector of Filter = table();
redef record Notice::Info += {
metric_index: Index &log &optional;
};
type MetricIndex: table[string] of count &default=0;
type MetricTable: table[string] of MetricIndex;
global metric_filters: table[ID] of vector of Filter = table();
global filter_store: table[ID, string] of Filter = table();
type MetricTable: table[Index] of count &default=0;
# This is indexed by metric ID and stream filter name.
global store: table[ID, string] of MetricTable = table();
global store: table[ID, string] of MetricTable = table() &default=table();
# This function checks if a threshold has been crossed and generates a
# notice if it has. It is also used as a method to implement
# mid-break-interval threshold crossing detection for cluster deployments.
global check_notice: function(filter: Filter, index: Index, val: count): bool;
# This is hook for watching thresholds being crossed. It is called whenever
# index values are updated and the new val is given as the `val` argument.
global data_added: function(filter: Filter, index: Index, val: count);
# This stores the current threshold index for filters using the
# $notice_thresholds element.
global thresholds: table[string] of count = {} &default=0;
# $notice_threshold and $notice_thresholds elements.
global thresholds: table[ID, string, Index] of count = {} &create_expire=renotice_interval &default=0;
event bro_init() &priority=5
{
Log::create_stream(METRICS, [$columns=Info, $ev=log_metrics]);
}
function reset(filter: Filter)
function index2str(index: Index): string
{
store[filter$id, filter$name] = table();
local out = "";
if ( index?$host )
out = fmt("%shost=%s", out, index$host);
if ( index?$network )
out = fmt("%s%snetwork=%s", out, |out|==0 ? "" : ", ", index$network);
if ( index?$str )
out = fmt("%s%sstr=%s", out, |out|==0 ? "" : ", ", index$str);
return fmt("metric_index(%s)", out);
}
event log_it(filter: Filter)
function write_log(ts: time, filter: Filter, data: MetricTable)
{
# If this node is the manager in a cluster, this needs to request values
# for this metric from all of the workers.
local id = filter$id;
local name = filter$name;
for ( agg_subnet in store[id, name] )
for ( index in data )
{
local metric_values = store[id, name][agg_subnet];
for ( index in metric_values )
{
local val = metric_values[index];
local m: Info = [$ts=network_time(),
$metric_id=id,
$filter_name=name,
$agg_subnet=fmt("%s", agg_subnet),
local val = data[index];
local m: Info = [$ts=ts,
$metric_id=filter$id,
$filter_name=filter$name,
$index=index,
$value=val];
if ( filter?$notice_threshold &&
m$value >= filter$notice_threshold )
{
print m;
NOTICE([$note=filter$note,
$msg=fmt("Metrics threshold crossed by %s %d/%d", m$agg_subnet, m$value, filter$notice_threshold),
$n=m$value]);
}
else if ( filter?$notice_thresholds &&
m$value >= filter$notice_thresholds[thresholds[cat(id,name)]] )
{
# TODO: implement this
}
# If there wasn't an index, remove the field.
if ( index == "" )
delete m$index;
# If there wasn't an aggregation subnet, remove the field.
if ( agg_subnet == "" )
delete m$agg_subnet;
if ( filter$log )
Log::write(METRICS, m);
}
}
reset(filter);
schedule filter$break_interval { log_it(filter) };
function reset(filter: Filter)
{
store[filter$id, filter$name] = table();
}
function add_filter(id: ID, filter: Filter)
@ -177,57 +186,85 @@ function add_filter(id: ID, filter: Filter)
metric_filters[id] = vector();
metric_filters[id][|metric_filters[id]|] = filter;
filter_store[id, filter$name] = filter;
store[id, filter$name] = table();
# Only do this on the manager if in a cluster.
schedule filter$break_interval { log_it(filter) };
schedule filter$break_interval { Metrics::log_it(filter) };
}
function add_data(id: ID, entry: Entry)
function add_data(id: ID, index: Index, increment: count)
{
if ( id !in metric_filters )
return;
local filters = metric_filters[id];
# Add the data to any of the defined filters.
# Try to add the data to all of the defined filters for the metric.
for ( filter_id in filters )
{
local filter = filters[filter_id];
# If this filter has a predicate, run the predicate and skip this
# entry if the predicate return false.
if ( filter?$pred &&
! filter$pred(entry) )
# index if the predicate return false.
if ( filter?$pred && ! filter$pred(index) )
next;
local agg_subnet = "";
local filt_store = store[id, filter$name];
if ( entry?$host )
if ( index?$host )
{
if ( filter?$aggregation_mask )
{
local agg_mask = filter$aggregation_mask;
agg_subnet = fmt("%s", mask_addr(entry$host, agg_mask));
index$network = mask_addr(index$host, filter$aggregation_mask);
delete index$host;
}
else if ( filter?$aggregation_table )
{
agg_subnet = fmt("%s", filter$aggregation_table[entry$host]);
# if an aggregation table is being used and the value isn't
# in the table, that means we aren't interested in it.
if ( agg_subnet == "" )
next;
index$network = filter$aggregation_table[index$host];
delete index$host;
}
}
local metric_tbl = store[id, filter$name];
if ( index !in metric_tbl )
metric_tbl[index] = 0;
metric_tbl[index] += increment;
data_added(filter, index, metric_tbl[index]);
}
}
function check_notice(filter: Filter, index: Index, val: count): bool
{
if ( (filter?$notice_threshold &&
[filter$id, filter$name, index] !in thresholds &&
val >= filter$notice_threshold) ||
(filter?$notice_thresholds &&
|filter$notice_thresholds| <= thresholds[filter$id, filter$name, index] &&
val >= filter$notice_thresholds[thresholds[filter$id, filter$name, index]]) )
return T;
else
agg_subnet = fmt("%s", entry$host);
return F;
}
if ( agg_subnet !in filt_store )
filt_store[agg_subnet] = table();
function do_notice(filter: Filter, index: Index, val: count)
{
# We include $peer_descr here because the a manager count have actually
# generated the notice even though the current remote peer for the event
# calling this could be a worker if this is running as a cluster.
local n: Notice::Info = [$note=filter$note,
$n=val,
$metric_index=index,
$peer_descr=peer_description];
n$msg = fmt("Threshold crossed by %s %d/%d", index2str(index), val, filter$notice_threshold);
if ( index?$str )
n$sub = index$str;
if ( index?$host )
n$src = index$host;
# TODO: not sure where to put the network yet.
local fs = filt_store[agg_subnet];
if ( entry$index !in fs )
fs[entry$index] = 0;
fs[entry$index] = fs[entry$index] + entry$increment;
}
NOTICE(n);
# This just needs set to some value so that it doesn't refire the
# notice until it expires from the table or it crosses the next
# threshold in the case of vectors of thresholds.
++thresholds[filter$id, filter$name, index];
}

View file

@ -0,0 +1,21 @@
@load ./main
module Metrics;
event Metrics::log_it(filter: Filter)
{
local id = filter$id;
local name = filter$name;
write_log(network_time(), filter, store[id, name]);
reset(filter);
schedule filter$break_interval { Metrics::log_it(filter) };
}
function data_added(filter: Filter, index: Index, val: count)
{
if ( check_notice(filter, index, val) )
do_notice(filter, index, val);
}

View file

@ -4,6 +4,10 @@
##! probably a safe assumption to make in most cases. If both addresses
##! are remote, it will use the $src address.
@load ../main
@load base/frameworks/notice
@load base/utils/site
module Notice;
export {

View file

@ -1,6 +1,8 @@
##! This script extends the built in notice code to implement the IP address
##! dropping functionality.
@load ../main
module Notice;
export {

View file

@ -1,3 +1,6 @@
@load ../main
@load base/utils/site
module Notice;
export {

View file

@ -1,3 +1,4 @@
@load ../main
module Notice;

View file

@ -1,3 +1,4 @@
@load ../main
module Notice;

View file

@ -308,7 +308,9 @@ function apply_policy(n: Notice::Info)
if ( ! n?$src_peer )
n$src_peer = get_event_peer();
n$peer_descr = n$src_peer?$descr ? n$src_peer$descr : fmt("%s", n$src_peer$host);
if ( ! n?$peer_descr )
n$peer_descr = n$src_peer?$descr ?
n$src_peer$descr : fmt("%s", n$src_peer$host);
if ( ! n?$actions )
n$actions = set();
@ -340,7 +342,7 @@ function apply_policy(n: Notice::Info)
# Create the ordered notice policy automatically which will be used at runtime
# for prioritized matching of the notice policy.
event bro_init()
event bro_init() &priority=10
{
local tmp: table[count] of set[PolicyItem] = table();
for ( pi in policy )

View file

@ -1,3 +1,7 @@
@load base/utils/conn-ids
@load base/utils/site
@load ./main
module Weird;
export {

View file

@ -4,6 +4,8 @@
##! open filter and all filters defined in Bro scripts with the
##! :bro:id:`capture_filters` and :bro:id:`restrict_filters` variables.
@load base/frameworks/notice
module PacketFilter;
export {

View file

@ -1,5 +1,7 @@
##! This script reports on packet loss from the various packet sources.
@load base/frameworks/notice
module PacketFilter;
export {

View file

@ -1,5 +1,7 @@
##! Script level signature support.
@load base/frameworks/notice
module Signatures;
export {

View file

@ -4,6 +4,9 @@
##! that they analyze. The entry point for providing new software detections
##! to this framework is through the :bro:id:`Software::found` function.
@load base/utils/directions-and-hosts
@load base/utils/numbers
module Software;
export {

File diff suppressed because it is too large Load diff

View file

@ -23,11 +23,11 @@
@load base/frameworks/signatures
@load base/frameworks/packet-filter
@load base/frameworks/software
@load base/frameworks/intel
@load base/frameworks/metrics
@load base/frameworks/communication
@load base/frameworks/control
@load base/frameworks/cluster
@load base/frameworks/metrics
@load base/frameworks/intel
@load base/frameworks/reporter
@load base/protocols/conn

View file

@ -8,6 +8,8 @@
##! This script does not work well in a cluster context unless it has a
##! remotely mounted disk to write the content files to.
@load base/utils/files
module Conn;
export {

View file

@ -1,3 +1,4 @@
@load base/utils/site
module Conn;
@ -12,7 +13,11 @@ export {
proto: transport_proto &log;
service: string &log &optional;
duration: interval &log &optional;
## The number of payload bytes the originator sent. For TCP
## this is taken from sequence numbers and might be inaccurate
## (e.g., due to large connections)
orig_bytes: count &log &optional;
## The number of payload bytes the responder sent. See ``orig_bytes``.
resp_bytes: count &log &optional;
## ========== ===============================================
@ -68,6 +73,17 @@ export {
## for instance. I.e., we just record that data went in that direction.
## This history is not meant to encode how much data that happened to be.
history: string &log &optional;
## Number of packets the originator sent.
## Only set if :bro:id:`use_conn_size_analyzer`=T
orig_pkts: count &log &optional;
## Number IP level bytes the originator sent (as seen on the wire,
## taken from IP total_length header field).
## Only set if :bro:id:`use_conn_size_analyzer`=T
orig_ip_bytes: count &log &optional;
## Number of packets the responder sent. See ``orig_pkts``.
resp_pkts: count &log &optional;
## Number IP level bytes the responder sent. See ``orig_pkts``.
resp_ip_bytes: count &log &optional;
};
global log_conn: event(rec: Info);
@ -143,31 +159,39 @@ function determine_service(c: connection): string
return to_lower(service);
}
## Fill out the c$conn record for logging
function set_conn(c: connection, eoc: bool)
{
if ( ! c?$conn )
{
local id = c$id;
local tmp: Info;
tmp$ts=c$start_time;
tmp$uid=c$uid;
tmp$id=id;
tmp$proto=get_port_transport_proto(id$resp_p);
if( |Site::local_nets| > 0 )
tmp$local_orig=Site::is_local_addr(id$orig_h);
c$conn = tmp;
}
c$conn$ts=c$start_time;
c$conn$uid=c$uid;
c$conn$id=c$id;
c$conn$proto=get_port_transport_proto(c$id$resp_p);
if( |Site::local_nets| > 0 )
c$conn$local_orig=Site::is_local_addr(c$id$orig_h);
if ( eoc )
{
if ( c$duration > 0secs )
{
c$conn$duration=c$duration;
# TODO: these should optionally use Gregor's new
# actual byte counting code if it's enabled.
c$conn$orig_bytes=c$orig$size;
c$conn$resp_bytes=c$resp$size;
}
if ( c$orig?$num_pkts )
{
# these are set if use_conn_size_analyzer=T
# we can have counts in here even without duration>0
c$conn$orig_pkts = c$orig$num_pkts;
c$conn$orig_ip_bytes = c$orig$num_bytes_ip;
c$conn$resp_pkts = c$resp$num_pkts;
c$conn$resp_ip_bytes = c$resp$num_bytes_ip;
}
local service = determine_service(c);
if ( service != "" )
c$conn$service=service;
@ -178,11 +202,6 @@ function set_conn(c: connection, eoc: bool)
}
}
event connection_established(c: connection) &priority=5
{
set_conn(c, F);
}
event content_gap(c: connection, is_orig: bool, seq: count, length: count) &priority=5
{
set_conn(c, F);
@ -190,9 +209,13 @@ event content_gap(c: connection, is_orig: bool, seq: count, length: count) &prio
c$conn$missed_bytes = c$conn$missed_bytes + length;
}
event connection_state_remove(c: connection) &priority=-5
event connection_state_remove(c: connection) &priority=5
{
set_conn(c, T);
}
event connection_state_remove(c: connection) &priority=-5
{
Log::write(CONN, c$conn);
}

View file

@ -1,3 +1,4 @@
@load ./consts
module DNS;

View file

@ -1,5 +1,8 @@
##! File extraction for FTP.
@load ./main
@load base/utils/files
module FTP;
export {

View file

@ -7,6 +7,10 @@
##!
##! * Handle encrypted sessions correctly (get an example?)
@load ./utils-commands
@load base/utils/paths
@load base/utils/numbers
module FTP;
export {

View file

@ -1,6 +1,10 @@
##! Extracts the items from HTTP traffic, one per file. At this time only
##! the message body from the server can be extracted with this script.
@load ./main
@load ./file-ident
@load base/utils/files
module HTTP;
export {

View file

@ -1,5 +1,7 @@
##! Calculate hashes for HTTP body transfers.
@load ./file-ident
module HTTP;
export {

View file

@ -1,6 +1,11 @@
##! This script is involved in the identification of file types in HTTP
##! response bodies.
@load base/frameworks/signatures
@load base/frameworks/notice
@load ./main
@load ./utils
# Add the magic number signatures to the core signature set.
redef signature_files += "base/protocols/http/file-ident.sig";
# Ignore the signatures used to match files

View file

@ -1,3 +1,5 @@
@load base/utils/numbers
@load base/utils/files
module HTTP;

View file

@ -3,6 +3,10 @@
##!
##! This script doesn't work yet and isn't loaded by default.
@load base/frameworks/notice
@load ./main
@load ./utils
module HTTP;
export {

View file

@ -1,5 +1,7 @@
##! Utilities specific for HTTP processing.
@load ./main
module HTTP;
export {

View file

@ -8,6 +8,9 @@
##! Example line from IRC server indicating that the DCC SEND is about to start:
##! PRIVMSG my_nick :^ADCC SEND whateverfile.zip 3640061780 1026 41709^A
@load ./main
@load base/utils/files
module IRC;
export {

View file

@ -1,4 +1,4 @@
@load protocols/mime/base
@load protocols/mime/file-ident
@load protocols/mime/file-extract
@load protocols/mime/file-hash
@load ./main
@load ./file-ident
@load ./file-extract
@load ./file-hash

View file

@ -1,4 +1,5 @@
@load ./file-ident
@load base/frameworks/notice
module MIME;

View file

@ -1,4 +1,4 @@
@load ./base
@load ./main
module MIME;

View file

@ -8,6 +8,8 @@
# programs for which we don't have an analyzer.
#
@load base/utils/conn-ids
module RPC;
export {

View file

@ -1,3 +1,6 @@
@load base/frameworks/notice
@load base/utils/addrs
@load base/utils/directions-and-hosts
module SMTP;

View file

@ -5,11 +5,23 @@
##! Requires that :bro:id:`use_conn_size_analyzer` is set to T! The heuristic
##! is not attempted if the connection size analyzer isn't enabled.
@load base/frameworks/notice
@load base/utils/site
@load base/utils/thresholds
@load base/utils/conn-ids
@load base/utils/directions-and-hosts
module SSH;
export {
redef enum Log::ID += { SSH };
redef enum Notice::Type += {
## This indicates that a heuristically detected "successful" SSH
## authentication occurred.
Login
};
type Info: record {
ts: time &log;
uid: string &log;
@ -88,6 +100,11 @@ function check_ssh_connection(c: connection, done: bool)
if ( c$ssh$done )
return;
# Make sure conn_size_analyzer is active by checking
# resp$num_bytes_ip
if ( !c$resp?$num_bytes_ip )
return;
# If this is still a live connection and the byte count has not
# crossed the threshold, just return and let the resheduled check happen later.
if ( !done && c$resp$num_bytes_ip < authentication_data_size )
@ -128,6 +145,10 @@ function check_ssh_connection(c: connection, done: bool)
event SSH::heuristic_successful_login(c: connection) &priority=-5
{
NOTICE([$note=Login,
$msg="Heuristically detected successful SSH login.",
$conn=c]);
Log::write(SSH, c$ssh);
}
event SSH::heuristic_failed_login(c: connection) &priority=-5

View file

@ -1,3 +1,5 @@
@load ./consts
@load base/frameworks/notice
module SSL;

View file

@ -1,6 +1,6 @@
# Don't edit! This file is automatically generated.
# Generated at: Wed Jun 29 07:52:38 -0400 2011
@load base/protocols/ssl
module SSL;
redef root_certs += {
["GTE CyberTrust Global Root"] = "\x30\x82\x02\x5A\x30\x82\x01\xC3\x02\x02\x01\xA5\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x04\x05\x00\x30\x75\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x18\x30\x16\x06\x03\x55\x04\x0A\x13\x0F\x47\x54\x45\x20\x43\x6F\x72\x70\x6F\x72\x61\x74\x69\x6F\x6E\x31\x27\x30\x25\x06\x03\x55\x04\x0B\x13\x1E\x47\x54\x45\x20\x43\x79\x62\x65\x72\x54\x72\x75\x73\x74\x20\x53\x6F\x6C\x75\x74\x69\x6F\x6E\x73\x2C\x20\x49\x6E\x63\x2E\x31\x23\x30\x21\x06\x03\x55\x04\x03\x13\x1A\x47\x54\x45\x20\x43\x79\x62\x65\x72\x54\x72\x75\x73\x74\x20\x47\x6C\x6F\x62\x61\x6C\x20\x52\x6F\x6F\x74\x30\x1E\x17\x0D\x39\x38\x30\x38\x31\x33\x30\x30\x32\x39\x30\x30\x5A\x17\x0D\x31\x38\x30\x38\x31\x33\x32\x33\x35\x39\x30\x30\x5A\x30\x75\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55\x53\x31\x18\x30\x16\x06\x03\x55\x04\x0A\x13\x0F\x47\x54\x45\x20\x43\x6F\x72\x70\x6F\x72\x61\x74\x69\x6F\x6E\x31\x27\x30\x25\x06\x03\x55\x04\x0B\x13\x1E\x47\x54\x45\x20\x43\x79\x62\x65\x72\x54\x72\x75\x73\x74\x20\x53\x6F\x6C\x75\x74\x69\x6F\x6E\x73\x2C\x20\x49\x6E\x63\x2E\x31\x23\x30\x21\x06\x03\x55\x04\x03\x13\x1A\x47\x54\x45\x20\x43\x79\x62\x65\x72\x54\x72\x75\x73\x74\x20\x47\x6C\x6F\x62\x61\x6C\x20\x52\x6F\x6F\x74\x30\x81\x9F\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x81\x8D\x00\x30\x81\x89\x02\x81\x81\x00\x95\x0F\xA0\xB6\xF0\x50\x9C\xE8\x7A\xC7\x88\xCD\xDD\x17\x0E\x2E\xB0\x94\xD0\x1B\x3D\x0E\xF6\x94\xC0\x8A\x94\xC7\x06\xC8\x90\x97\xC8\xB8\x64\x1A\x7A\x7E\x6C\x3C\x53\xE1\x37\x28\x73\x60\x7F\xB2\x97\x53\x07\x9F\x53\xF9\x6D\x58\x94\xD2\xAF\x8D\x6D\x88\x67\x80\xE6\xED\xB2\x95\xCF\x72\x31\xCA\xA5\x1C\x72\xBA\x5C\x02\xE7\x64\x42\xE7\xF9\xA9\x2C\xD6\x3A\x0D\xAC\x8D\x42\xAA\x24\x01\x39\xE6\x9C\x3F\x01\x85\x57\x0D\x58\x87\x45\xF8\xD3\x85\xAA\x93\x69\x26\x85\x70\x48\x80\x3F\x12\x15\xC7\x79\xB4\x1F\x05\x2F\x3B\x62\x99\x02\x03\x01\x00\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x04\x05\x00\x03\x81\x81\x00\x6D\xEB\x1B\x09\xE9\x5E\xD9\x51\xDB\x67\x22\x61\xA4\x2A\x3C\x48\x77\xE3\xA0\x7C\xA6\xDE\x73\xA2\x14\x03\x85\x3D\xFB\xAB\x0E\x30\xC5\x83\x16\x33\x81\x13\x08\x9E\x7B\x34\x4E\xDF\x40\xC8\x74\xD7\xB9\x7D\xDC\xF4\x76\x55\x7D\x9B\x63\x54\x18\xE9\xF0\xEA\xF3\x5C\xB1\xD9\x8B\x42\x1E\xB9\xC0\x95\x4E\xBA\xFA\xD5\xE2\x7C\xF5\x68\x61\xBF\x8E\xEC\x05\x97\x5F\x5B\xB0\xD7\xA3\x85\x34\xC4\x24\xA7\x0D\x0F\x95\x93\xEF\xCB\x94\xD8\x9E\x1F\x9D\x5C\x85\x6D\xC7\xAA\xAE\x4F\x1F\x22\xB5\xCD\x95\xAD\xBA\xA7\xCC\xF9\xAB\x0B\x7A\x7F",

View file

@ -1,3 +1,4 @@
@load ./site
type Direction: enum {
## The connection originator is not within the locally-monitored network,

View file

@ -17,9 +17,11 @@ export {
## Networks that are considered "local".
const local_nets: set[subnet] &redef;
## This is used for mapping between local networks and string
## values for the CIDRs represented.
global local_nets_table: table[subnet] of string = {};
## This is used for retrieving the subnet when you multiple
## :bro:id:`local_nets`. A membership query can be done with an
## :bro:type:`addr` and the table will yield the subnet it was found
## within.
global local_nets_table: table[subnet] of subnet = {};
## Networks that are considered "neighbors".
const neighbor_nets: set[subnet] &redef;
@ -145,6 +147,6 @@ event bro_init() &priority=10
# Create the local_nets mapping table.
for ( cidr in Site::local_nets )
local_nets_table[cidr] = fmt("%s", cidr);
local_nets_table[cidr] = cidr;
}

View file

@ -1,4 +1,4 @@
@load base/frameworks/control
# If an instance is a controllee, it implicitly needs to listen for remote
# connections.
@load frameworks/communication/listen-clear

View file

@ -1,3 +1,4 @@
@load base/frameworks/control
@load base/frameworks/communication
module Control;

View file

@ -1,5 +1,9 @@
##! Finds connections with protocols on non-standard ports with DPD.
@load base/frameworks/notice
@load base/utils/site
@load base/utils/conn-ids
module ProtocolDetector;
export {

View file

@ -4,6 +4,8 @@
##! A caveat to logging packet data is that in some cases, the packet may
##! not be the packet that actually caused the protocol violation.
@load base/frameworks/dpd
module DPD;
export {

View file

@ -1,3 +1,5 @@
@load base/frameworks/metrics
@load base/utils/site
redef enum Metrics::ID += {
CONNS_ORIGINATED,
@ -14,7 +16,7 @@ event bro_init()
event connection_established(c: connection)
{
Metrics::add_data(CONNS_ORIGINATED, [$host=c$id$orig_h]);
Metrics::add_data(CONNS_RESPONDED, [$host=c$id$resp_h]);
Metrics::add_data(CONNS_ORIGINATED, [$host=c$id$orig_h], 1);
Metrics::add_data(CONNS_RESPONDED, [$host=c$id$resp_h], 1);
}

View file

@ -1,4 +1,6 @@
@load base/frameworks/metrics
@load base/protocols/http
@load base/utils/site
redef enum Metrics::ID += {
HTTP_REQUESTS_BY_STATUS_CODE,
@ -7,16 +9,21 @@ redef enum Metrics::ID += {
event bro_init()
{
Metrics::add_filter(HTTP_REQUESTS_BY_HOST_HEADER, [$break_interval=5mins]);
# Site::local_nets must be defined in order for this to actually do anything.
Metrics::add_filter(HTTP_REQUESTS_BY_STATUS_CODE, [$aggregation_table=Site::local_nets_table, $break_interval=5mins]);
# TODO: these are waiting on a fix with table vals + records before they will work.
#Metrics::add_filter(HTTP_REQUESTS_BY_HOST_HEADER,
# [$pred(index: Index) = { return Site:is_local_addr(index$host) },
# $aggregation_mask=24,
# $break_interval=5mins]);
#
## Site::local_nets must be defined in order for this to actually do anything.
#Metrics::add_filter(HTTP_REQUESTS_BY_STATUS_CODE, [$aggregation_table=Site::local_nets_table,
# $break_interval=5mins]);
}
event HTTP::log_http(rec: HTTP::Info)
{
if ( rec?$host )
Metrics::add_data(HTTP_REQUESTS_BY_HOST_HEADER, [$index=rec$host]);
Metrics::add_data(HTTP_REQUESTS_BY_HOST_HEADER, [$str=rec$host], 1);
if ( rec?$status_code )
Metrics::add_data(HTTP_REQUESTS_BY_STATUS_CODE, [$host=rec$id$orig_h, $index=fmt("%d", rec$status_code)]);
Metrics::add_data(HTTP_REQUESTS_BY_STATUS_CODE, [$host=rec$id$orig_h, $str=fmt("%d", rec$status_code)], 1);
}

View file

@ -1,4 +1,5 @@
@load base/frameworks/metrics
@load base/protocols/ssl
redef enum Metrics::ID += {
SSL_SERVERNAME,
@ -8,8 +9,8 @@ event bro_init()
{
Metrics::add_filter(SSL_SERVERNAME,
[$name="no-google-ssl-servers",
$pred(entry: Metrics::Entry) = {
return (/google\.com$/ !in entry$index);
$pred(index: Metrics::Index) = {
return (/google\.com$/ !in index$str);
},
$break_interval=10secs
]);
@ -18,5 +19,5 @@ event bro_init()
event SSL::log_ssl(rec: SSL::Info)
{
if ( rec?$server_name )
Metrics::add_data(SSL_SERVERNAME, [$index=rec$server_name]);
Metrics::add_data(SSL_SERVERNAME, [$str=rec$server_name], 1);
}

View file

@ -1,3 +1,5 @@
@load base/frameworks/notice
@load base/frameworks/software
module Software;

View file

@ -1,3 +1,5 @@
@load base/frameworks/notice
@load base/frameworks/software
module Software;

View file

@ -1,3 +1,2 @@
@load integration/barnyard2/types
@load integration/barnyard2/event
@load integration/barnyard2/base
@load ./types
@load ./main

View file

@ -1,3 +0,0 @@
## This is the event that Barnyard2 instances will send if they're
## configured with the bro_alert output plugin.
global barnyard_alert: event(id: Barnyard2::PacketID, alert: Barnyard2::AlertData, msg: string, data: string);

View file

@ -2,7 +2,7 @@
##! Barnyard2 and logs them. In the future it will do more correlation
##! and derive new notices from the alerts.
@load integration/barnyard2/types
@load ./types
module Barnyard2;

View file

@ -22,4 +22,11 @@ export {
dst_ip: addr;
dst_p: port;
} &log;
## This is the event that Barnyard2 instances will send if they're
## configured with the bro_alert output plugin.
global barnyard_alert: event(id: Barnyard2::PacketID,
alert: Barnyard2::AlertData,
msg: string,
data: string);
}

View file

@ -3,6 +3,8 @@
##! output provides an easy way to determine a count of the IP addresses in
##! use on a network per day.
@load base/utils/directions-and-hosts
module KnownHosts;
export {

View file

@ -3,6 +3,8 @@
##! completed a TCP handshake with another host. If a protocol is detected
##! during the session, the protocol will also be logged.
@load base/utils/directions-and-hosts
module KnownServices;
redef enum Log::ID += { KNOWN_SERVICES };

View file

@ -1,4 +1,4 @@
@load base/frameworks/notice
@load base/frameworks/notice/main
@load port-name
module Scan;

View file

@ -1,3 +1,4 @@
@load base/protocols/dns/main
redef dns_skip_all_auth = F;
redef dns_skip_all_addl = F;

View file

@ -8,6 +8,9 @@
##! to be within a local zone. :bro:id:`local_zones` variable **must**
##! be set appropriately for this detection.
@load base/frameworks/notice/main
@load base/utils/site
module DNS;
export {

View file

@ -1,3 +1,5 @@
@load base/frameworks/notice/main
@load base/protocols/ftp/main
module FTP;

View file

@ -6,6 +6,8 @@
##! * Detect client software with password given for anonymous users
##! (e.g. cyberduck@example.net)
@load base/frameworks/software/main
module FTP;
export {

View file

@ -1,9 +1,14 @@
##! This script takes MD5 sums of files transferred over HTTP and checks them with
##! Team Cymru's Malware Hash Registry (http://www.team-cymru.org/Services/MHR/).
##! By default, not all file transfers will have MD5 sums calculated. Read the
##! documentation for the base/protocols/http/file-hash.bro script to see how to
##! documentation for the :doc:base/protocols/http/file-hash.bro script to see how to
##! configure which transfers will have hashes calculated.
@load base/frameworks/notice/main
@load base/protocols/http/main
@load base/protocols/http/utils
@load base/protocols/http/file-hash
export {
redef enum Notice::Type += {
## If the MD5 sum of a file transferred over HTTP

View file

@ -1,5 +1,9 @@
##! Intelligence based HTTP detections.
@load base/protocols/http/main
@load base/protocols/http/utils
@load base/frameworks/intel/main
module HTTP;
event log_http(rec: Info)

View file

@ -1,5 +1,9 @@
##! SQL injection detection in HTTP.
@load base/frameworks/notice/main
@load base/frameworks/metrics/main
@load base/protocols/http/main
module HTTP;
export {
@ -35,13 +39,13 @@ export {
event bro_init()
{
Metrics::add_filter(SQL_ATTACKS, [$log=T,
$break_interval=1mins,
Metrics::add_filter(SQL_ATTACKS, [$log=F,
$break_interval=5mins,
$note=SQL_Injection_Attacker]);
Metrics::add_filter(SQL_ATTACKS_AGAINST, [$log=T,
$break_interval=1mins,
Metrics::add_filter(SQL_ATTACKS_AGAINST, [$log=F,
$break_interval=5mins,
$note=SQL_Injection_Attack,
$notice_thresholds=vector(10,100)]);
$notice_threshold=50]);
}
event http_request(c: connection, method: string, original_URI: string,
@ -51,7 +55,7 @@ event http_request(c: connection, method: string, original_URI: string,
{
add c$http$tags[URI_SQLI];
Metrics::add_data(SQL_ATTACKS, [$host=c$id$orig_h]);
Metrics::add_data(SQL_ATTACKS_AGAINST, [$host=c$id$resp_h]);
Metrics::add_data(SQL_ATTACKS, [$host=c$id$orig_h], 1);
Metrics::add_data(SQL_ATTACKS_AGAINST, [$host=c$id$resp_h], 1);
}
}

View file

@ -1,3 +1,7 @@
@load base/frameworks/signatures/main
@load base/frameworks/software/main
@load base/protocols/http/main
@load base/protocols/http/utils
module HTTP;

View file

@ -1,5 +1,7 @@
##! Extract and include the header keys used for each request in the log.
@load base/protocols/http/main
module HTTP;
export {

View file

@ -1,5 +1,7 @@
##! Software identification and extraction for HTTP traffic.
@load base/frameworks/software/main
module HTTP;
export {

View file

@ -1,5 +1,8 @@
##! This script extracts and logs variables from cookies sent by clients
@load base/protocols/http/main
@load base/protocols/http/utils
module HTTP;
redef record Info += {

View file

@ -1,5 +1,8 @@
##! This script extracts and logs variables from the requested URI
@load base/protocols/http/main
@load base/protocols/http/utils
module HTTP;
redef record Info += {

View file

@ -1,3 +1,5 @@
@load base/frameworks/notice/main
@load base/protocols/smtp/main
module SMTP;

View file

@ -7,6 +7,9 @@
##! * Find some heuristic to determine if email was sent through
##! a MS Exhange webmail interface as opposed to a desktop client.
@load base/frameworks/software/main
@load base/protocols/smtp/main
module SMTP;
export {

View file

@ -1,4 +1,8 @@
@load base/frameworks/metrics
@load base/frameworks/notice
@load base/frameworks/intel
module SSH;
export {
@ -12,6 +16,12 @@ export {
Login_By_Password_Guesser,
};
redef enum Metrics::ID += {
## This metric is to measure failed logins with the hope of detecting
## bruteforcing hosts.
FAILED_LOGIN,
};
## The number of failed SSH connections before a host is designated as
## guessing passwords.
const password_guesses_limit = 30 &redef;
@ -26,54 +36,40 @@ export {
## client subnets and the yield value represents server subnets.
const ignore_guessers: table[subnet] of subnet &redef;
## Keeps count of how many rejections a host has had.
global password_rejections: table[addr] of TrackCount
&write_expire=guessing_timeout
&synchronized;
## Keeps track of hosts identified as guessing passwords.
global password_guessers: set[addr] &read_expire=guessing_timeout+1hr &synchronized;
}
event bro_init()
{
Metrics::add_filter(FAILED_LOGIN, [$name="detect-bruteforcing", $log=F,
$note=Password_Guessing,
$notice_threshold=password_guesses_limit,
$notice_freq=1hr,
$break_interval=guessing_timeout]);
}
event SSH::heuristic_successful_login(c: connection)
{
local id = c$id;
# TODO: this should be migrated to the metrics framework.
if ( id$orig_h in password_rejections &&
password_rejections[id$orig_h]$n > password_guesses_limit &&
id$orig_h !in password_guessers )
{
add password_guessers[id$orig_h];
NOTICE([$note=Login_By_Password_Guesser,
$conn=c,
$n=password_rejections[id$orig_h]$n,
$msg=fmt("Successful SSH login by password guesser %s", id$orig_h),
$sub=fmt("%d failed logins", password_rejections[id$orig_h]$n)]);
}
# TODO: This is out for the moment pending some more additions to the
# metrics framework.
#if ( id$orig_h in password_guessers )
# {
# NOTICE([$note=Login_By_Password_Guesser,
# $conn=c,
# $msg=fmt("Successful SSH login by password guesser %s", id$orig_h)]);
# }
}
event SSH::heuristic_failed_login(c: connection)
{
local id = c$id;
# presumed failure
if ( id$orig_h !in password_rejections )
password_rejections[id$orig_h] = new_track_count();
# Track the number of rejections
# TODO: this should be migrated to the metrics framework.
# Add data to the FAILED_LOGIN metric unless this connection should
# be ignored.
if ( ! (id$orig_h in ignore_guessers &&
id$resp_h in ignore_guessers[id$orig_h]) )
++password_rejections[id$orig_h]$n;
if ( default_check_threshold(password_rejections[id$orig_h]) )
{
add password_guessers[id$orig_h];
NOTICE([$note=Password_Guessing,
$conn=c,
$msg=fmt("SSH password guessing by %s", id$orig_h),
$sub=fmt("%d apparently failed logins", password_rejections[id$orig_h]$n),
$n=password_rejections[id$orig_h]$n]);
}
Metrics::add_data(FAILED_LOGIN, [$host=id$orig_h], 1);
}

View file

@ -1,6 +1,9 @@
##! This implements all of the additional information and geodata detections
##! for SSH analysis.
@load base/frameworks/notice/main
@load base/protocols/ssh/main
module SSH;
export {

View file

@ -1,3 +1,4 @@
@load base/frameworks/notice/main
module SSH;
@ -36,7 +37,7 @@ event SSH::heuristic_successful_login(c: connection)
}
}
# Check to see if this login went to an interesting hostname.
when ( local resp_hostname = lookup_addr(c$id$orig_h) )
when ( local resp_hostname = lookup_addr(c$id$resp_h) )
{
if ( interesting_hostnames in resp_hostname )
{

View file

@ -1,3 +1,4 @@
@load base/frameworks/software/main
module SSH;

View file

@ -1,3 +1,4 @@
@load base/utils/directions-and-hosts
module KnownCerts;

View file

@ -1,3 +1,5 @@
@load base/frameworks/notice/main
@load base/protocols/ssl/main
module SSL;

View file

@ -1,6 +1,9 @@
##! This strives to tune out high volume and less useful data
##! from the notice log.
@load base/frameworks/notice
@load base/frameworks/notice/weird
# Remove these notices from logging since they can be too noisy.
redef Notice::ignored_types += {
Weird::Content_Gap,

View file

@ -2,6 +2,8 @@
##! good to set in most cases or other things that could be done to achieve
##! better detection.
@load base/utils/site
event bro_init() &priority=-10
{
if ( |Site::local_nets| == 0 )

View file

@ -1,4 +1,4 @@
@load base/frameworks/software
@load protocols/conn/known-hosts
@load protocols/conn/known-services
@load protocols/ssl/known-certs

View file

@ -1,5 +1,7 @@
##! Local site policy loaded only by the manager in a cluster.
@load base/frameworks/notice
# If you are running a cluster you should define your Notice::policy here
# so that notice processing occurs on the manager.
redef Notice::policy += {

View file

@ -62,6 +62,7 @@ redef signature_files += "frameworks/signatures/detect-windows-shells.sig";
# Uncomment this redef if you want to extract SMTP MIME entities for
# some file types. The numbers given indicate how many bytes to extract for
# the various mime types.
@load base/protocols/smtp/entities-excerpt
redef SMTP::entity_excerpt_len += {
# ["text/plain"] = 1024,
# ["text/html"] = 1024,

View file

@ -15,11 +15,13 @@
# @load frameworks/control/controller.bro
@load frameworks/dpd/detect-protocols.bro
@load frameworks/dpd/packet-segment-logging.bro
@load frameworks/metrics/conn-example.bro
@load frameworks/metrics/http-example.bro
@load frameworks/metrics/ssl-example.bro
@load frameworks/software/version-changes.bro
@load frameworks/software/vulnerable.bro
@load integration/barnyard2/__load__.bro
@load integration/barnyard2/base.bro
@load integration/barnyard2/event.bro
@load integration/barnyard2/main.bro
@load integration/barnyard2/types.bro
@load misc/analysis-groups.bro
@load misc/loaded-scripts.bro

View file

@ -60,7 +60,7 @@ BroDoc::BroDoc(const std::string& rel, const std::string& abs)
if ( ! reST_file )
fprintf(stderr, "Failed to open %s\n", reST_filename.c_str());
#ifdef DEBUG
#ifdef DOCDEBUG
fprintf(stdout, "Documenting absolute source: %s\n", abs.c_str());
fprintf(stdout, "\trelative dir: %s\n", rel.c_str());
fprintf(stdout, "\tdoc title: %s\n", doc_title.c_str());

View file

@ -107,20 +107,28 @@ macro(BIF_TARGET bifInput)
add_custom_command(OUTPUT ${bifOutputs}
COMMAND bifcl
ARGS ${CMAKE_CURRENT_SOURCE_DIR}/${bifInput} || (rm -f ${bifOutputs} && exit 1)
# in order be able to run bro from the build directory,
# the generated bro script needs to be inside a
# a directory tree named the same way it will be
# referenced from an @load
COMMAND "${CMAKE_COMMAND}"
ARGS -E copy ${bifInput}.bro base/${bifInput}.bro
COMMAND "${CMAKE_COMMAND}"
ARGS -E remove -f ${bifInput}.bro
DEPENDS ${bifInput}
DEPENDS bifcl
COMMENT "[BIFCL] Processing ${bifInput}"
)
list(APPEND ALL_BIF_OUTPUTS ${bifOutputs})
list(APPEND INSTALL_BIF_OUTPUTS
${CMAKE_CURRENT_BINARY_DIR}/${bifInput}.bro)
${CMAKE_CURRENT_BINARY_DIR}/base/${bifInput}.bro)
endmacro(BIF_TARGET)
# returns a list of output files that bifcl will produce
# for given input file in ${outputFileVar}
macro(GET_BIF_OUTPUT_FILES inputFile outputFileVar)
set(${outputFileVar}
${inputFile}.bro
base/${inputFile}.bro
${inputFile}.func_def
${inputFile}.func_h
${inputFile}.func_init
@ -424,7 +432,7 @@ set(brolibs
target_link_libraries(bro ${brolibs})
install(TARGETS bro DESTINATION bin)
install(FILES ${INSTALL_BIF_OUTPUTS} DESTINATION ${BRO_SCRIPT_INSTALL_PATH})
install(FILES ${INSTALL_BIF_OUTPUTS} DESTINATION ${BRO_SCRIPT_INSTALL_PATH}/base)
set(BRO_EXE bro
CACHE STRING "Bro executable binary" FORCE)

View file

@ -47,7 +47,7 @@ CompositeHash::CompositeHash(TypeList* composite_type)
else
{
size = ComputeKeySize();
size = ComputeKeySize(0, 1, true);
if ( size > 0 )
// Fixed size. Make sure what we get is fully aligned.
@ -244,7 +244,7 @@ HashKey* CompositeHash::ComputeHash(const Val* v, int type_check) const
if ( ! k )
{
int sz = ComputeKeySize(v, type_check);
int sz = ComputeKeySize(v, type_check, false);
if ( sz == 0 )
return 0;
@ -331,7 +331,8 @@ HashKey* CompositeHash::ComputeSingletonHash(const Val* v, int type_check) const
}
int CompositeHash::SingleTypeKeySize(BroType* bt, const Val* v,
int type_check, int sz, bool optional) const
int type_check, int sz, bool optional,
bool calc_static_size) const
{
InternalTypeTag t = bt->InternalType();
@ -393,7 +394,8 @@ int CompositeHash::SingleTypeKeySize(BroType* bt, const Val* v,
sz = SingleTypeKeySize(rt->FieldType(i),
rv ? rv->Lookup(i) : 0,
type_check, sz, optional);
type_check, sz, optional,
calc_static_size);
if ( ! sz )
return 0;
}
@ -408,7 +410,7 @@ int CompositeHash::SingleTypeKeySize(BroType* bt, const Val* v,
case TYPE_INTERNAL_STRING:
if ( ! v )
return optional ? sz : 0;
return (optional && ! calc_static_size) ? sz : 0;
// Factor in length field.
sz = SizeAlign(sz, sizeof(int));
@ -422,7 +424,7 @@ int CompositeHash::SingleTypeKeySize(BroType* bt, const Val* v,
return sz;
}
int CompositeHash::ComputeKeySize(const Val* v, int type_check) const
int CompositeHash::ComputeKeySize(const Val* v, int type_check, bool calc_static_size) const
{
const type_list* tl = type->Types();
const val_list* vl = 0;
@ -440,7 +442,7 @@ int CompositeHash::ComputeKeySize(const Val* v, int type_check) const
loop_over_list(*tl, i)
{
sz = SingleTypeKeySize((*tl)[i], v ? v->AsListVal()->Index(i) : 0,
type_check, sz, false);
type_check, sz, false, calc_static_size);
if ( ! sz )
return 0;
}
@ -615,10 +617,6 @@ const char* CompositeHash::RecoverOneVal(const HashKey* k, const char* kp0,
pval = new AddrVal(addr_val);
break;
case TYPE_NET:
pval = new NetVal(addr_val);
break;
default:
reporter->InternalError("bad internal address in CompositeHash::RecoverOneVal()");
pval = 0;

Some files were not shown because too many files have changed in this diff Show more