Merge remote-tracking branch 'origin/master' into topic/robin/dynamic-plugins-2.3

(Never good to name a branch after version anticipated to include it ...)
This commit is contained in:
Robin Sommer 2014-05-14 16:21:51 -07:00
commit bbd409d274
542 changed files with 18136 additions and 5621 deletions

View file

@ -1 +1,2 @@
@load ./main.bro
@load ./magic

View file

@ -0,0 +1,2 @@
@load-sigs ./general
@load-sigs ./libmagic

View file

@ -0,0 +1,11 @@
# General purpose file magic signatures.
signature file-plaintext {
file-magic /([[:print:][:space:]]{10})/
file-mime "text/plain", -20
}
signature file-tar {
file-magic /([[:print:]\x00]){100}(([[:digit:]\x00\x20]){8}){3}/
file-mime "application/x-tar", 150
}

File diff suppressed because it is too large Load diff

View file

@ -41,15 +41,15 @@ export {
## If this file was transferred over a network
## connection this should show the host or hosts that
## the data sourced from.
tx_hosts: set[addr] &log;
tx_hosts: set[addr] &default=addr_set() &log;
## If this file was transferred over a network
## connection this should show the host or hosts that
## the data traveled to.
rx_hosts: set[addr] &log;
rx_hosts: set[addr] &default=addr_set() &log;
## Connection UIDs over which the file was transferred.
conn_uids: set[string] &log;
conn_uids: set[string] &default=string_set() &log;
## An identification of the source of the file data. E.g. it
## may be a network protocol over which it was transferred, or a
@ -63,12 +63,13 @@ export {
depth: count &default=0 &log;
## A set of analysis types done during the file analysis.
analyzers: set[string] &log;
analyzers: set[string] &default=string_set() &log;
## A mime type provided by libmagic against the *bof_buffer*
## field of :bro:see:`fa_file`, or in the cases where no
## buffering of the beginning of file occurs, an initial
## guess of the mime type based on the first data seen.
## A mime type provided by the strongest file magic signature
## match against the *bof_buffer* field of :bro:see:`fa_file`,
## or in the cases where no buffering of the beginning of file
## occurs, an initial guess of the mime type based on the first
## data seen.
mime_type: string &log &optional;
## A filename for the file if one is available from the source

View file

@ -5,11 +5,11 @@
##! ``config``: setting ``tsv`` to the string ``T`` turns the output into
##! "tab-separated-value" mode where only a single header row with the column
##! names is printed out as meta information, with no "# fields" prepended; no
##! other meta data gets included in that mode.
##!
##! other meta data gets included in that mode.
##!
##! Example filter using this::
##!
##! local my_filter: Log::Filter = [$name = "my-filter", $writer = Log::WRITER_ASCII, $config = table(["tsv"] = "T")];
##!
##! local my_filter: Log::Filter = [$name = "my-filter", $writer = Log::WRITER_ASCII, $config = table(["tsv"] = "T")];
##!
module LogAscii;
@ -17,27 +17,51 @@ module LogAscii;
export {
## If true, output everything to stdout rather than
## into files. This is primarily for debugging purposes.
##
## This option is also available as a per-filter ``$config`` option.
const output_to_stdout = F &redef;
## If true, the default will be to write logs in a JSON format.
##
## This option is also available as a per-filter ``$config`` option.
const use_json = F &redef;
## Format of timestamps when writing out JSON. By default, the JSON formatter will
## use double values for timestamps which represent the number of seconds from the
## UNIX epoch.
const json_timestamps: JSON::TimestampFormat = JSON::TS_EPOCH &redef;
## If true, include lines with log meta information such as column names
## with types, the values of ASCII logging options that are in use, and
## the time when the file was opened and closed (the latter at the end).
##
## If writing in JSON format, this is implicitly disabled.
const include_meta = T &redef;
## Prefix for lines with meta information.
##
## This option is also available as a per-filter ``$config`` option.
const meta_prefix = "#" &redef;
## Separator between fields.
##
## This option is also available as a per-filter ``$config`` option.
const separator = Log::separator &redef;
## Separator between set elements.
##
## This option is also available as a per-filter ``$config`` option.
const set_separator = Log::set_separator &redef;
## String to use for empty fields. This should be different from
## *unset_field* to make the output unambiguous.
## *unset_field* to make the output unambiguous.
##
## This option is also available as a per-filter ``$config`` option.
const empty_field = Log::empty_field &redef;
## String to use for an unset &optional field.
##
## This option is also available as a per-filter ``$config`` option.
const unset_field = Log::unset_field &redef;
}

View file

@ -206,6 +206,38 @@ export {
## The maximum amount of time a plugin can delay email from being sent.
const max_email_delay = 15secs &redef;
## Contains a portion of :bro:see:`fa_file` that's also contained in
## :bro:see:`Notice::Info`.
type FileInfo: record {
fuid: string; ##< File UID.
desc: string; ##< File description from e.g.
##< :bro:see:`Files::describe`.
mime: string &optional; ##< Strongest mime type match for file.
cid: conn_id &optional; ##< Connection tuple over which file is sent.
cuid: string &optional; ##< Connection UID over which file is sent.
};
## Creates a record containing a subset of a full :bro:see:`fa_file` record.
##
## f: record containing metadata about a file.
##
## Returns: record containing a subset of fields copied from *f*.
global create_file_info: function(f: fa_file): Notice::FileInfo;
## Populates file-related fields in a notice info record.
##
## f: record containing metadata about a file.
##
## n: a notice record that needs file-related fields populated.
global populate_file_info: function(f: fa_file, n: Notice::Info);
## Populates file-related fields in a notice info record.
##
## fi: record containing metadata about a file.
##
## n: a notice record that needs file-related fields populated.
global populate_file_info2: function(fi: Notice::FileInfo, n: Notice::Info);
## A log postprocessing function that implements emailing the contents
## of a log upon rotation to any configured :bro:id:`Notice::mail_dest`.
## The rotated log is removed upon being sent.
@ -493,6 +525,42 @@ function execute_with_notice(cmd: string, n: Notice::Info)
#system_env(cmd, tags);
}
function create_file_info(f: fa_file): Notice::FileInfo
{
local fi: Notice::FileInfo = Notice::FileInfo($fuid = f$id,
$desc = Files::describe(f));
if ( f?$mime_type )
fi$mime = f$mime_type;
if ( f?$conns && |f$conns| == 1 )
for ( id in f$conns )
{
fi$cid = id;
fi$cuid = f$conns[id]$uid;
}
return fi;
}
function populate_file_info(f: fa_file, n: Notice::Info)
{
populate_file_info2(create_file_info(f), n);
}
function populate_file_info2(fi: Notice::FileInfo, n: Notice::Info)
{
if ( ! n?$fuid )
n$fuid = fi$fuid;
if ( ! n?$file_mime_type && fi?$mime )
n$file_mime_type = fi$mime;
n$file_desc = fi$desc;
n$id = fi$cid;
n$uid = fi$cuid;
}
# This is run synchronously as a function before all of the other
# notice related functions and events. It also modifies the
# :bro:type:`Notice::Info` record in place.
@ -503,21 +571,7 @@ function apply_policy(n: Notice::Info)
n$ts = network_time();
if ( n?$f )
{
if ( ! n?$fuid )
n$fuid = n$f$id;
if ( ! n?$file_mime_type && n$f?$mime_type )
n$file_mime_type = n$f$mime_type;
n$file_desc = Files::describe(n$f);
if ( n$f?$conns && |n$f$conns| == 1 )
{
for ( id in n$f$conns )
n$conn = n$f$conns[id];
}
}
populate_file_info(n$f, n);
if ( n?$conn )
{

View file

@ -185,6 +185,7 @@ export {
["RPC_underflow"] = ACTION_LOG,
["RST_storm"] = ACTION_LOG,
["RST_with_data"] = ACTION_LOG,
["SSL_many_server_names"] = ACTION_LOG,
["simultaneous_open"] = ACTION_LOG_PER_CONN,
["spontaneous_FIN"] = ACTION_IGNORE,
["spontaneous_RST"] = ACTION_IGNORE,

View file

@ -70,6 +70,9 @@ export {
## The network time at which a signature matching type of event
## to be logged has occurred.
ts: time &log;
## A unique identifier of the connection which triggered the
## signature match event
uid: string &log &optional;
## The host which triggered the signature match event.
src_addr: addr &log &optional;
## The host port on which the signature-matching activity
@ -167,7 +170,7 @@ event signature_match(state: signature_state, msg: string, data: string)
# Trim the matched data down to something reasonable
if ( |data| > 140 )
data = fmt("%s...", sub_bytes(data, 0, 140));
local src_addr: addr;
local src_port: port;
local dst_addr: addr;
@ -192,6 +195,7 @@ event signature_match(state: signature_state, msg: string, data: string)
{
local info: Info = [$ts=network_time(),
$note=Sensitive_Signature,
$uid=state$conn$uid,
$src_addr=src_addr,
$src_port=src_port,
$dst_addr=dst_addr,
@ -212,11 +216,11 @@ event signature_match(state: signature_state, msg: string, data: string)
if ( ++count_per_resp[dst,sig_id] in count_thresholds )
{
NOTICE([$note=Count_Signature, $conn=state$conn,
$msg=msg,
$n=count_per_resp[dst,sig_id],
$sub=fmt("%d matches of signature %s on host %s",
count_per_resp[dst,sig_id],
sig_id, dst)]);
$msg=msg,
$n=count_per_resp[dst,sig_id],
$sub=fmt("%d matches of signature %s on host %s",
count_per_resp[dst,sig_id],
sig_id, dst)]);
}
}
@ -290,16 +294,16 @@ event signature_match(state: signature_state, msg: string, data: string)
orig, vcount, resp);
Log::write(Signatures::LOG,
[$ts=network_time(),
$note=Multiple_Signatures,
$src_addr=orig,
$dst_addr=resp, $sig_id=sig_id, $sig_count=vcount,
$event_msg=fmt("%s different signatures triggered", vcount),
$sub_msg=vert_scan_msg]);
[$ts=network_time(),
$note=Multiple_Signatures,
$src_addr=orig,
$dst_addr=resp, $sig_id=sig_id, $sig_count=vcount,
$event_msg=fmt("%s different signatures triggered", vcount),
$sub_msg=vert_scan_msg]);
NOTICE([$note=Multiple_Signatures, $src=orig, $dst=resp,
$msg=fmt("%s different signatures triggered", vcount),
$n=vcount, $sub=vert_scan_msg]);
$msg=fmt("%s different signatures triggered", vcount),
$n=vcount, $sub=vert_scan_msg]);
last_vthresh[orig] = vcount;
}

View file

@ -287,6 +287,13 @@ function parse_mozilla(unparsed_version: string): Description
if ( 2 in parts )
v = parse(parts[2])$version;
}
else if ( / Java\/[0-9]\./ in unparsed_version )
{
software_name = "Java";
parts = split_all(unparsed_version, /Java\/[0-9\._]*/);
if ( 2 in parts )
v = parse(parts[2])$version;
}
return [$version=v, $unparsed_version=unparsed_version, $name=software_name];
}

View file

@ -62,7 +62,7 @@ export {
# Add events to the cluster framework to make this work.
redef Cluster::manager2worker_events += /SumStats::cluster_(ss_request|get_result|threshold_crossed)/;
redef Cluster::manager2worker_events += /SumStats::(get_a_key)/;
redef Cluster::worker2manager_events += /SumStats::cluster_(ss_response|send_result|key_intermediate_response)/;
redef Cluster::worker2manager_events += /SumStats::cluster_(send_result|key_intermediate_response)/;
redef Cluster::worker2manager_events += /SumStats::(send_a_key|send_no_key)/;
@if ( Cluster::local_node_type() != Cluster::MANAGER )
@ -74,7 +74,7 @@ global recent_global_view_keys: table[string, Key] of count &create_expire=1min
# Result tables indexed on a uid that are currently being sent to the
# manager.
global sending_results: table[string] of ResultTable = table() &create_expire=1min;
global sending_results: table[string] of ResultTable = table() &read_expire=1min;
# This is done on all non-manager node types in the event that a sumstat is
# being collected somewhere other than a worker.
@ -203,7 +203,7 @@ event SumStats::cluster_threshold_crossed(ss_name: string, key: SumStats::Key, t
# This variable is maintained by manager nodes as they collect and aggregate
# results.
# Index on a uid.
global stats_keys: table[string] of set[Key] &create_expire=1min
global stats_keys: table[string] of set[Key] &read_expire=1min
&expire_func=function(s: table[string] of set[Key], idx: string): interval
{
Reporter::warning(fmt("SumStat key request for the %s SumStat uid took longer than 1 minute and was automatically cancelled.", idx));
@ -216,16 +216,16 @@ global stats_keys: table[string] of set[Key] &create_expire=1min
# result is written out and deleted from here.
# Indexed on a uid.
# TODO: add an &expire_func in case not all results are received.
global done_with: table[string] of count &create_expire=1min &default=0;
global done_with: table[string] of count &read_expire=1min &default=0;
# This variable is maintained by managers to track intermediate responses as
# they are getting a global view for a certain key.
# Indexed on a uid.
global key_requests: table[string] of Result &create_expire=1min;
global key_requests: table[string] of Result &read_expire=1min;
# Store uids for dynamic requests here to avoid cleanup on the uid.
# (This needs to be done differently!)
global dynamic_requests: set[string] &create_expire=1min;
global dynamic_requests: set[string] &read_expire=1min;
# This variable is maintained by managers to prevent overwhelming communication due
# to too many intermediate updates. Each sumstat is tracked separately so that

View file

@ -2,23 +2,59 @@
module SumStats;
event SumStats::process_epoch_result(ss: SumStat, now: time, data: ResultTable)
{
# TODO: is this the right processing group size?
local i = 50;
for ( key in data )
{
ss$epoch_result(now, key, data[key]);
delete data[key];
if ( |data| == 0 )
{
if ( ss?$epoch_finished )
ss$epoch_finished(now);
# Now that no data is left we can finish.
return;
}
i = i-1;
if ( i == 0 )
{
# TODO: is this the right interval?
schedule 0.01 secs { process_epoch_result(ss, now, data) };
break;
}
}
}
event SumStats::finish_epoch(ss: SumStat)
{
if ( ss$name in result_store )
{
local now = network_time();
if ( ss?$epoch_result )
{
local data = result_store[ss$name];
# TODO: don't block here.
for ( key in data )
ss$epoch_result(now, key, data[key]);
local now = network_time();
if ( bro_is_terminating() )
{
for ( key in data )
ss$epoch_result(now, key, data[key]);
if ( ss?$epoch_finished )
ss$epoch_finished(now);
}
else
{
event SumStats::process_epoch_result(ss, now, data);
}
}
if ( ss?$epoch_finished )
ss$epoch_finished(now);
# We can reset here because we know that the reference
# to the data will be maintained by the process_epoch_result
# event.
reset(ss);
}