Merge remote-tracking branch 'origin/master' into topic/bif_cleanup

Conflicts:
	src/bro.bif
This commit is contained in:
Matthias Vallentin 2012-01-18 14:56:53 -08:00
commit 6c255d13ff
247 changed files with 6038 additions and 3503 deletions

View file

@ -44,7 +44,7 @@ event bro_init() &priority=9
{
if ( n$node_type == WORKER && n$proxy == node )
Communication::nodes[i] =
[$host=n$ip, $connect=F, $class=i, $events=worker2proxy_events];
[$host=n$ip, $connect=F, $class=i, $sync=T, $auth=T, $events=worker2proxy_events];
# accepts connections from the previous one.
# (This is not ideal for setups with many proxies)

View file

@ -1,43 +1,30 @@
##! This is a utility script that sends the current values of all &redef'able
##! consts to a remote Bro then sends the :bro:id:`configuration_update` event
##! and terminates processing.
##!
##! Intended to be used from the command line like this when starting a controller::
##!
##! bro <scripts> frameworks/control/controller Control::host=<host_addr> Control::port=<host_port> Control::cmd=<command> [Control::arg=<arg>]
##!
##! A controllee only needs to load the controllee script in addition
##! to the specific analysis scripts desired. It may also need a node
##! configured as a controller node in the communications nodes configuration::
##!
##! bro <scripts> frameworks/control/controllee
##!
##! To use the framework as a controllee, it only needs to be loaded and
##! the controlled node need to accept all events in the "Control::" namespace
##! from the host where the control actions will be performed from along with
##! using the "control" class.
##! The control framework provides the foundation for providing "commands"
##! that can be taken remotely at runtime to modify a running Bro instance
##! or collect information from the running instance.
module Control;
export {
## This is the address of the host that will be controlled.
## The address of the host that will be controlled.
const host = 0.0.0.0 &redef;
## This is the port of the host that will be controlled.
## The port of the host that will be controlled.
const host_port = 0/tcp &redef;
## This is the command that is being done. It's typically set on the
## command line and influences whether this instance starts up as a
## controller or controllee.
## The command that is being done. It's typically set on the
## command line.
const cmd = "" &redef;
## This can be used by commands that take an argument.
const arg = "" &redef;
## Events that need to be handled by controllers.
const controller_events = /Control::.*_request/ &redef;
## Events that need to be handled by controllees.
const controllee_events = /Control::.*_response/ &redef;
## These are the commands that can be given on the command line for
## The commands that can currently be given on the command line for
## remote control.
const commands: set[string] = {
"id_value",
@ -45,15 +32,15 @@ export {
"net_stats",
"configuration_update",
"shutdown",
};
} &redef;
## Variable IDs that are to be ignored by the update process.
const ignore_ids: set[string] = {
};
const ignore_ids: set[string] = { };
## Event for requesting the value of an ID (a variable).
global id_value_request: event(id: string);
## Event for returning the value of an ID after an :bro:id:`id_request` event.
## Event for returning the value of an ID after an
## :bro:id:`Control::id_value_request` event.
global id_value_response: event(id: string, val: string);
## Requests the current communication status.
@ -68,7 +55,8 @@ export {
## Inform the remote Bro instance that it's configuration may have been updated.
global configuration_update_request: event();
## This event is a wrapper and alias for the :bro:id:`configuration_update_request` event.
## This event is a wrapper and alias for the
## :bro:id:`Control::configuration_update_request` event.
## This event is also a primary hooking point for the control framework.
global configuration_update: event();
## Message in response to a configuration update request.

View file

@ -80,15 +80,15 @@ signature irc_server_reply {
tcp-state responder
}
signature irc_sig3 {
signature irc_server_to_server1 {
ip-proto == tcp
payload /(.*\x0a)*(\x20)*[Ss][Ee][Rr][Vv][Ee][Rr](\x20)+.+\x0a/
payload /(|.*[\r\n]) *[Ss][Ee][Rr][Vv][Ee][Rr] +[^ ]+ +[0-9]+ +:.+[\r\n]/
}
signature irc_sig4 {
signature irc_server_to_server2 {
ip-proto == tcp
payload /(.*\x0a)*(\x20)*[Ss][Ee][Rr][Vv][Ee][Rr](\x20)+.+\x0a/
requires-reverse-signature irc_sig3
payload /(|.*[\r\n]) *[Ss][Ee][Rr][Vv][Ee][Rr] +[^ ]+ +[0-9]+ +:.+[\r\n]/
requires-reverse-signature irc_server_to_server1
enable "irc"
}

View file

@ -11,7 +11,7 @@
# user_name
# file_name
# file_md5
# x509_cert - DER encoded, not PEM (ascii armored)
# x509_md5
# Example tags:
# infrastructure
@ -25,6 +25,7 @@
module Intel;
export {
## The intel logging stream identifier.
redef enum Log::ID += { LOG };
redef enum Notice::Type += {
@ -33,72 +34,117 @@ export {
Detection,
};
## Record type used for logging information from the intelligence framework.
## Primarily for problems or oddities with inserting and querying data.
## This is important since the content of the intelligence framework can
## change quite dramatically during runtime and problems may be introduced
## into the data.
type Info: record {
## The current network time.
ts: time &log;
## Represents the severity of the message.
## This value should be one of: "info", "warn", "error"
level: string &log;
## The message.
message: string &log;
};
## Record to represent metadata associated with a single piece of
## intelligence.
type MetaData: record {
## A description for the data.
desc: string &optional;
## A URL where more information may be found about the intelligence.
url: string &optional;
## The time at which the data was first declared to be intelligence.
first_seen: time &optional;
## When this data was most recent inserted into the framework.
latest_seen: time &optional;
## Arbitrary text tags for the data.
tags: set[string];
};
## Record to represent a singular piece of intelligence.
type Item: record {
## If the data is an IP address, this hold the address.
ip: addr &optional;
## If the data is textual, this holds the text.
str: string &optional;
## If the data is numeric, this holds the number.
num: int &optional;
## The subtype of the data for when either the $str or $num fields are
## given. If one of those fields are given, this field must be present.
subtype: string &optional;
## The next five fields are temporary until a better model for
## attaching metadata to an intelligence item is created.
desc: string &optional;
url: string &optional;
first_seen: time &optional;
latest_seen: time &optional;
tags: set[string];
## These single string tags are throw away until pybroccoli supports sets
## These single string tags are throw away until pybroccoli supports sets.
tag1: string &optional;
tag2: string &optional;
tag3: string &optional;
};
## Record model used for constructing queries against the intelligence
## framework.
type QueryItem: record {
ip: addr &optional;
str: string &optional;
num: int &optional;
subtype: string &optional;
## If an IP address is being queried for, this field should be given.
ip: addr &optional;
## If a string is being queried for, this field should be given.
str: string &optional;
## If numeric data is being queried for, this field should be given.
num: int &optional;
## If either a string or number is being queried for, this field should
## indicate the subtype of the data.
subtype: string &optional;
or_tags: set[string] &optional;
and_tags: set[string] &optional;
## A set of tags where if a single metadata record attached to an item
## has any one of the tags defined in this field, it will match.
or_tags: set[string] &optional;
## A set of tags where a single metadata record attached to an item
## must have all of the tags defined in this field.
and_tags: set[string] &optional;
## The predicate can be given when searching for a match. It will
## be tested against every :bro:type:`MetaData` item associated with
## the data being matched on. If it returns T a single time, the
## matcher will consider that the item has matched.
pred: function(meta: Intel::MetaData): bool &optional;
## be tested against every :bro:type:`Intel::MetaData` item associated
## with the data being matched on. If it returns T a single time, the
## matcher will consider that the item has matched. This field can
## be used for constructing arbitrarily complex queries that may not
## be possible with the $or_tags or $and_tags fields.
pred: function(meta: Intel::MetaData): bool &optional;
};
## Function to insert data into the intelligence framework.
##
## item: The data item.
##
## Returns: T if the data was successfully inserted into the framework,
## otherwise it returns F.
global insert: function(item: Item): bool;
## A wrapper for the :bro:id:`Intel::insert` function. This is primarily
## used as the external API for inserting data into the intelligence
## using Broccoli.
global insert_event: event(item: Item);
## Function for matching data within the intelligence framework.
global matcher: function(item: QueryItem): bool;
type MetaDataStore: table[count] of MetaData;
type DataStore: record {
ip_data: table[addr] of MetaDataStore;
## The first string is the actual value and the second string is the subtype.
string_data: table[string, string] of MetaDataStore;
int_data: table[int, string] of MetaDataStore;
};
global data_store: DataStore;
}
type MetaDataStore: table[count] of MetaData;
type DataStore: record {
ip_data: table[addr] of MetaDataStore;
# The first string is the actual value and the second string is the subtype.
string_data: table[string, string] of MetaDataStore;
int_data: table[int, string] of MetaDataStore;
};
global data_store: DataStore;
event bro_init()
{
Log::create_stream(Intel::LOG, [$columns=Info]);

View file

@ -1 +1,2 @@
@load ./scp
@load ./sftp

View file

@ -47,6 +47,10 @@ export {
## copy of the rotated-log to each destination in the set. This
## table can be modified at run-time.
global scp_destinations: table[Writer, string] of set[SCPDestination];
## Default naming format for timestamps embedded into log filenames
## that use the SCP rotator.
const scp_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef;
}
function scp_postprocessor(info: Log::RotationInfo): bool
@ -56,7 +60,11 @@ function scp_postprocessor(info: Log::RotationInfo): bool
local command = "";
for ( d in scp_destinations[info$writer, info$path] )
command += fmt("scp %s %s@%s:%s;", info$fname, d$user, d$host, d$path);
{
local dst = fmt("%s/%s.%s.log", d$path, info$path,
strftime(Log::scp_rotation_date_format, info$open));
command += fmt("scp %s %s@%s:%s;", info$fname, d$user, d$host, dst);
}
command += fmt("/bin/rm %s", info$fname);
system(command);

View file

@ -0,0 +1,73 @@
##! This script defines a postprocessing function that can be applied
##! to a logging filter in order to automatically SFTP
##! a log stream (or a subset of it) to a remote host at configurable
##! rotation time intervals. Generally, to use this functionality
##! you must handle the :bro:id:`bro_init` event and do the following
##! in your handler:
##!
##! 1) Create a new :bro:type:`Log::Filter` record that defines a name/path,
##! rotation interval, and set the ``postprocessor`` to
##! :bro:id:`Log::sftp_postprocessor`.
##! 2) Add the filter to a logging stream using :bro:id:`Log::add_filter`.
##! 3) Add a table entry to :bro:id:`Log::sftp_destinations` for the filter's
##! writer/path pair which defines a set of :bro:type:`Log::SFTPDestination`
##! records.
module Log;
export {
## Securely transfers the rotated-log to all the remote hosts
## defined in :bro:id:`Log::sftp_destinations` and then deletes
## the local copy of the rotated-log. It's not active when
## reading from trace files.
##
## info: A record holding meta-information about the log file to be
## postprocessed.
##
## Returns: True if sftp system command was initiated or
## if no destination was configured for the log as described
## by *info*.
global sftp_postprocessor: function(info: Log::RotationInfo): bool;
## A container that describes the remote destination for the SFTP command,
## comprised of the username, host, and path at which to upload the file.
type SFTPDestination: record {
## The remote user to log in as. A trust mechanism should be
## pre-established.
user: string;
## The remote host to which to transfer logs.
host: string;
## The path/directory on the remote host to send logs.
path: string;
};
## A table indexed by a particular log writer and filter path, that yields
## a set remote destinations. The :bro:id:`Log::sftp_postprocessor`
## function queries this table upon log rotation and performs a secure
## transfer of the rotated-log to each destination in the set. This
## table can be modified at run-time.
global sftp_destinations: table[Writer, string] of set[SFTPDestination];
## Default naming format for timestamps embedded into log filenames
## that use the SFTP rotator.
const sftp_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef;
}
function sftp_postprocessor(info: Log::RotationInfo): bool
{
if ( reading_traces() || [info$writer, info$path] !in sftp_destinations )
return T;
local command = "";
for ( d in sftp_destinations[info$writer, info$path] )
{
local dst = fmt("%s/%s.%s.log", d$path, info$path,
strftime(Log::sftp_rotation_date_format, info$open));
command += fmt("echo put %s %s | sftp -b - %s@%s;", info$fname, dst,
d$user, d$host);
}
command += fmt("/bin/rm %s", info$fname);
system(command);
return T;
}

View file

@ -21,8 +21,9 @@ export {
## Separator between set elements.
const set_separator = "," &redef;
## String to use for empty fields.
const empty_field = "-" &redef;
## String to use for empty fields. This should be different from
## *unset_field* to make the output non-ambigious.
const empty_field = "(empty)" &redef;
## String to use for an unset &optional field.
const unset_field = "-" &redef;

View file

@ -13,11 +13,11 @@
module Metrics;
export {
## This value allows a user to decide how large of result groups the
## workers should transmit values.
## Allows a user to decide how large of result groups the
## workers should transmit values for cluster metric aggregation.
const cluster_send_in_groups_of = 50 &redef;
## This is the percent of the full threshold value that needs to be met
## The percent of the full threshold value that needs to be met
## on a single worker for that worker to send the value to its manager in
## order for it to request a global view for that value. There is no
## requirement that the manager requests a global view for the index
@ -25,11 +25,11 @@ export {
## recently.
const cluster_request_global_view_percent = 0.1 &redef;
## This event is sent by the manager in a cluster to initiate the
## Event sent by the manager in a cluster to initiate the
## collection of metrics values for a filter.
global cluster_filter_request: event(uid: string, id: ID, filter_name: string);
## This event is sent by nodes that are collecting metrics after receiving
## Event sent by nodes that are collecting metrics after receiving
## a request for the metric filter from the manager.
global cluster_filter_response: event(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool);
@ -40,12 +40,12 @@ export {
global cluster_index_request: event(uid: string, id: ID, filter_name: string, index: Index);
## This event is sent by nodes in response to a
## :bro:id:`cluster_index_request` event.
## :bro:id:`Metrics::cluster_index_request` event.
global cluster_index_response: event(uid: string, id: ID, filter_name: string, index: Index, val: count);
## This is sent by workers to indicate that they crossed the percent of the
## current threshold by the percentage defined globally in
## :bro:id:`cluster_request_global_view_percent`
## :bro:id:`Metrics::cluster_request_global_view_percent`
global cluster_index_intermediate_response: event(id: Metrics::ID, filter_name: string, index: Metrics::Index, val: count);
## This event is scheduled internally on workers to send result chunks.

View file

@ -1,13 +1,16 @@
##! This is the implementation of the metrics framework.
##! The metrics framework provides a way to count and measure data.
@load base/frameworks/notice
module Metrics;
export {
## The metrics logging stream identifier.
redef enum Log::ID += { LOG };
## Identifiers for metrics to collect.
type ID: enum {
## Blank placeholder value.
NOTHING,
};
@ -15,10 +18,13 @@ export {
## current value to the logging stream.
const default_break_interval = 15mins &redef;
## This is the interval for how often notices will happen after they have
## already fired.
## This is the interval for how often threshold based notices will happen
## after they have already fired.
const renotice_interval = 1hr &redef;
## Represents a thing which is having metrics collected for it. An instance
## of this record type and a :bro:type:`Metrics::ID` together represent a
## single measurement.
type Index: record {
## Host is the value to which this metric applies.
host: addr &optional;
@ -37,17 +43,30 @@ export {
network: subnet &optional;
} &log;
## The record type that is used for logging metrics.
type Info: record {
## Timestamp at which the metric was "broken".
ts: time &log;
## What measurement the metric represents.
metric_id: ID &log;
## The name of the filter being logged. :bro:type:`Metrics::ID` values
## can have multiple filters which represent different perspectives on
## the data so this is necessary to understand the value.
filter_name: string &log;
## What the metric value applies to.
index: Index &log;
## The simple numeric value of the metric.
value: count &log;
};
# TODO: configure a metrics filter logging stream to log the current
# TODO: configure a metrics filter logging stream to log the current
# metrics configuration in case someone is looking through
# old logs and the configuration has changed since then.
## Filters define how the data from a metric is aggregated and handled.
## Filters can be used to set how often the measurements are cut or "broken"
## and logged or how the data within them is aggregated. It's also
## possible to disable logging and use filters for thresholding.
type Filter: record {
## The :bro:type:`Metrics::ID` that this filter applies to.
id: ID &optional;
@ -62,7 +81,7 @@ export {
aggregation_mask: count &optional;
## This is essentially a mapping table between addresses and subnets.
aggregation_table: table[subnet] of subnet &optional;
## The interval at which the metric should be "broken" and written
## The interval at which this filter should be "broken" and written
## to the logging stream. The counters are also reset to zero at
## this time so any threshold based detection needs to be set to a
## number that should be expected to happen within this period.
@ -79,7 +98,7 @@ export {
notice_threshold: count &optional;
## A series of thresholds at which to generate notices.
notice_thresholds: vector of count &optional;
## How often this notice should be raised for this metric index. It
## How often this notice should be raised for this filter. It
## will be generated everytime it crosses a threshold, but if the
## $break_interval is set to 5mins and this is set to 1hr the notice
## only be generated once per hour even if something crosses the
@ -87,15 +106,43 @@ export {
notice_freq: interval &optional;
};
## Function to associate a metric filter with a metric ID.
##
## id: The metric ID that the filter should be associated with.
##
## filter: The record representing the filter configuration.
global add_filter: function(id: ID, filter: Filter);
## Add data into a :bro:type:`Metrics::ID`. This should be called when
## a script has measured some point value and is ready to increment the
## counters.
##
## id: The metric ID that the data represents.
##
## index: The metric index that the value is to be added to.
##
## increment: How much to increment the counter by.
global add_data: function(id: ID, index: Index, increment: count);
## Helper function to represent a :bro:type:`Metrics::Index` value as
## a simple string
##
## index: The metric index that is to be converted into a string.
##
## Returns: A string reprentation of the metric index.
global index2str: function(index: Index): string;
# This is the event that is used to "finish" metrics and adapt the metrics
# framework for clustered or non-clustered usage.
## Event that is used to "finish" metrics and adapt the metrics
## framework for clustered or non-clustered usage.
##
## ..note: This is primarily intended for internal use.
global log_it: event(filter: Filter);
## Event to access metrics records as they are passed to the logging framework.
global log_metrics: event(rec: Info);
## Type to store a table of metrics values. Interal use only!
type MetricTable: table[Index] of count &default=0;
}
redef record Notice::Info += {
@ -105,7 +152,6 @@ redef record Notice::Info += {
global metric_filters: table[ID] of vector of Filter = table();
global filter_store: table[ID, string] of Filter = table();
type MetricTable: table[Index] of count &default=0;
# This is indexed by metric ID and stream filter name.
global store: table[ID, string] of MetricTable = table() &default=table();

View file

@ -31,6 +31,7 @@ export {
## Add a helper to the notice policy for looking up GeoIP data.
redef Notice::policy += {
[$pred(n: Notice::Info) = { return (n$note in Notice::lookup_location_types); },
$action = ACTION_ADD_GEODATA,
$priority = 10],
};
}

View file

@ -11,8 +11,8 @@ module Notice;
export {
redef enum Action += {
## Indicate that the generated email should be addressed to the
## appropriate email addresses as found in the
## :bro:id:`Site::addr_to_emails` variable based on the relevant
## appropriate email addresses as found by the
## :bro:id:`Site::get_emails` function based on the relevant
## address or addresses indicated in the notice.
ACTION_EMAIL_ADMIN
};

View file

@ -7,7 +7,7 @@ module Notice;
export {
redef enum Action += {
## Indicates that the notice should be sent to the pager email address
## configured in the :bro:id:`mail_page_dest` variable.
## configured in the :bro:id:`Notice::mail_page_dest` variable.
ACTION_PAGE
};

View file

@ -10,18 +10,21 @@ module Notice;
export {
## Activate pretty-printed alarm summaries.
const pretty_print_alarms = T &redef;
## Address to send the pretty-printed reports to. Default if not set is
## :bro:id:`Notice::mail_dest`.
const mail_dest_pretty_printed = "" &redef;
## If an address from one of these networks is reported, we mark
## the entry with an addition quote symbol (that is, ">"). Many MUAs
## the entry with an additional quote symbol (i.e., ">"). Many MUAs
## then highlight such lines differently.
global flag_nets: set[subnet] &redef;
## Function that renders a single alarm. Can be overidden.
global pretty_print_alarm: function(out: file, n: Info) &redef;
## Force generating mail file, even if reading from traces or no mail
## destination is defined. This is mainly for testing.
global force_email_summaries = F &redef;
}
# We maintain an old-style file recording the pretty-printed alarms.
@ -32,6 +35,9 @@ global pp_alarms_open: bool = F;
# Returns True if pretty-printed alarm summaries are activated.
function want_pp() : bool
{
if ( force_email_summaries )
return T;
return (pretty_print_alarms && ! reading_traces()
&& (mail_dest != "" || mail_dest_pretty_printed != ""));
}
@ -41,38 +47,49 @@ function pp_open()
{
if ( pp_alarms_open )
return;
pp_alarms_open = T;
pp_alarms = open(pp_alarms_name);
local dest = mail_dest_pretty_printed != "" ? mail_dest_pretty_printed
: mail_dest;
local headers = email_headers("Alarm summary", dest);
write_file(pp_alarms, headers + "\n");
}
# Closes and mails out the current output file.
function pp_send()
function pp_send(rinfo: Log::RotationInfo)
{
if ( ! pp_alarms_open )
return;
write_file(pp_alarms, "\n\n--\n[Automatically generated]\n\n");
close(pp_alarms);
system(fmt("/bin/cat %s | %s -t -oi && /bin/rm %s",
pp_alarms_name, sendmail, pp_alarms_name));
pp_alarms_open = F;
local from = strftime("%H:%M:%S", rinfo$open);
local to = strftime("%H:%M:%S", rinfo$close);
local subject = fmt("Alarm summary from %s-%s", from, to);
local dest = mail_dest_pretty_printed != "" ? mail_dest_pretty_printed
: mail_dest;
if ( dest == "" )
# No mail destination configured, just leave the file alone. This is mainly for
# testing.
return;
local headers = email_headers(subject, dest);
local header_name = pp_alarms_name + ".tmp";
local header = open(header_name);
write_file(header, headers + "\n");
close(header);
system(fmt("/bin/cat %s %s | %s -t -oi && /bin/rm -f %s %s",
header_name, pp_alarms_name, sendmail, header_name, pp_alarms_name));
}
# Postprocessor function that triggers the email.
function pp_postprocessor(info: Log::RotationInfo): bool
{
if ( want_pp() )
pp_send();
pp_send(info);
return T;
}
@ -80,7 +97,7 @@ event bro_init()
{
if ( ! want_pp() )
return;
# This replaces the standard non-pretty-printing filter.
Log::add_filter(Notice::ALARM_LOG,
[$name="alarm-mail", $writer=Log::WRITER_NONE,
@ -92,13 +109,13 @@ event notice(n: Notice::Info) &priority=-5
{
if ( ! want_pp() )
return;
if ( ACTION_LOG !in n$actions )
if ( ACTION_ALARM !in n$actions )
return;
if ( ! pp_alarms_open )
pp_open();
pretty_print_alarm(pp_alarms, n);
}
@ -108,12 +125,12 @@ function do_msg(out: file, n: Info, line1: string, line2: string, line3: string,
@ifdef ( Notice::ACTION_ADD_GEODATA ) # Make tests happy, cyclic dependency.
if ( n?$remote_location && n$remote_location?$country_code )
country = fmt(" (remote location %s)", n$remote_location$country_code);
@endif
@endif
line1 = cat(line1, country);
local resolved = "";
if ( host1 != 0.0.0.0 )
resolved = fmt("%s # %s = %s", resolved, host1, name1);
@ -133,64 +150,64 @@ function do_msg(out: file, n: Info, line1: string, line2: string, line3: string,
function pretty_print_alarm(out: file, n: Info)
{
local pdescr = "";
@if ( Cluster::is_enabled() )
pdescr = "local";
if ( n?$src_peer )
pdescr = n$src_peer?$descr ? n$src_peer$descr : fmt("%s", n$src_peer$host);
pdescr = fmt("<%s> ", pdescr);
@endif
local msg = fmt( "%s%s", pdescr, n$msg);
local who = "";
local h1 = 0.0.0.0;
local h2 = 0.0.0.0;
local orig_p = "";
local resp_p = "";
if ( n?$id )
{
orig_p = fmt(":%s", n$id$orig_p);
resp_p = fmt(":%s", n$id$resp_p);
h1 = n$id$orig_h;
h2 = n$id$resp_h;
who = fmt("%s:%s -> %s:%s", h1, n$id$orig_p, h2, n$id$resp_p);
}
if ( n?$src && n?$dst )
else if ( n?$src && n?$dst )
{
h1 = n$src;
h2 = n$dst;
who = fmt("%s%s -> %s%s", h1, orig_p, h2, resp_p);
if ( n?$uid )
who = fmt("%s (uid %s)", who, n$uid );
who = fmt("%s -> %s", h1, h2);
}
else if ( n?$src )
{
local p = "";
if ( n?$p )
p = fmt(":%s", n$p);
h1 = n$src;
who = fmt("%s%s", h1, p);
who = fmt("%s%s", h1, (n?$p ? fmt(":%s", n$p) : ""));
}
if ( n?$uid )
who = fmt("%s (uid %s)", who, n$uid );
local flag = (h1 in flag_nets || h2 in flag_nets);
local line1 = fmt(">%s %D %s %s", (flag ? ">" : " "), network_time(), n$note, who);
local line2 = fmt(" %s", msg);
local line3 = n?$sub ? fmt(" %s", n$sub) : "";
if ( h1 == 0.0.0.0 )
{
do_msg(out, n, line1, line2, line3, h1, "", h2, "");
return;
}
if ( reading_traces() )
{
do_msg(out, n, line1, line2, line3, h1, "<skipped>", h2, "<skipped>");
return;
}
when ( local h1name = lookup_addr(h1) )
{
if ( h2 == 0.0.0.0 )

View file

@ -1,32 +1,52 @@
##! Loading this script extends the :bro:enum:`Notice::ACTION_EMAIL` action
##! by appending to the email the hostnames associated with
##! :bro:type:`Notice::Info`'s *src* and *dst* fields as determined by a
##! DNS lookup.
@load ../main
module Notice;
# This probably doesn't actually work due to the async lookup_addr.
# We have to store references to the notices here because the when statement
# clones the frame which doesn't give us access to modify values outside
# of it's execution scope. (we get a clone of the notice instead of a
# reference to the original notice)
global tmp_notice_storage: table[string] of Notice::Info &create_expire=max_email_delay+10secs;
event Notice::notice(n: Notice::Info) &priority=10
{
if ( ! n?$src && ! n?$dst )
return;
# This should only be done for notices that are being sent to email.
if ( ACTION_EMAIL !in n$actions )
return;
# I'm not recovering gracefully from the when statements because I want
# the notice framework to detect that something has exceeded the maximum
# allowed email delay and tell the user.
local uid = unique_id("");
tmp_notice_storage[uid] = n;
local output = "";
if ( n?$src )
{
add n$email_delay_tokens["hostnames-src"];
when ( local src_name = lookup_addr(n$src) )
{
output = string_cat("orig_h/src hostname: ", src_name, "\n");
n$email_body_sections[|n$email_body_sections|] = output;
output = string_cat("orig/src hostname: ", src_name, "\n");
tmp_notice_storage[uid]$email_body_sections[|tmp_notice_storage[uid]$email_body_sections|] = output;
delete tmp_notice_storage[uid]$email_delay_tokens["hostnames-src"];
}
}
if ( n?$dst )
{
add n$email_delay_tokens["hostnames-dst"];
when ( local dst_name = lookup_addr(n$dst) )
{
output = string_cat("resp_h/dst hostname: ", dst_name, "\n");
n$email_body_sections[|n$email_body_sections|] = output;
output = string_cat("resp/dst hostname: ", dst_name, "\n");
tmp_notice_storage[uid]$email_body_sections[|tmp_notice_storage[uid]$email_body_sections|] = output;
delete tmp_notice_storage[uid]$email_delay_tokens["hostnames-dst"];
}
}
}

View file

@ -7,9 +7,9 @@
module Notice;
export {
redef enum Log::ID += {
redef enum Log::ID += {
## This is the primary logging stream for notices.
LOG,
LOG,
## This is the notice policy auditing log. It records what the current
## notice policy is at Bro init time.
POLICY_LOG,
@ -17,7 +17,7 @@ export {
ALARM_LOG,
};
## Scripts creating new notices need to redef this enum to add their own
## Scripts creating new notices need to redef this enum to add their own
## specific notice types which would then get used when they call the
## :bro:id:`NOTICE` function. The convention is to give a general category
## along with the specific notice separating words with underscores and
@ -28,14 +28,14 @@ export {
## Notice reporting a count of how often a notice occurred.
Tally,
};
## These are values representing actions that can be taken with notices.
type Action: enum {
## Indicates that there is no action to be taken.
ACTION_NONE,
## Indicates that the notice should be sent to the notice logging stream.
ACTION_LOG,
## Indicates that the notice should be sent to the email address(es)
## Indicates that the notice should be sent to the email address(es)
## configured in the :bro:id:`Notice::mail_dest` variable.
ACTION_EMAIL,
## Indicates that the notice should be alarmed. A readable ASCII
@ -46,12 +46,12 @@ export {
## duplicate notice suppression that the notice framework does.
ACTION_NO_SUPPRESS,
};
## The notice framework is able to do automatic notice supression by
## The notice framework is able to do automatic notice supression by
## utilizing the $identifier field in :bro:type:`Notice::Info` records.
## Set this to "0secs" to completely disable automated notice suppression.
const default_suppression_interval = 1hrs &redef;
type Info: record {
## An absolute time indicating when the notice occurred, defaults
## to the current network time.
@ -73,14 +73,18 @@ export {
## reference to the actual connection will be deleted after applying
## the notice policy.
iconn: icmp_conn &optional;
## The type of the notice.
## The transport protocol. Filled automatically when either conn, iconn
## or p is specified.
proto: transport_proto &log &optional;
## The :bro:type:`Notice::Type` of the notice.
note: Type &log;
## The human readable message for the notice.
msg: string &log &optional;
## The human readable sub-message.
sub: string &log &optional;
## Source address, if we don't have a :bro:type:`conn_id`.
src: addr &log &optional;
## Destination address.
@ -89,33 +93,39 @@ export {
p: port &log &optional;
## Associated count, or perhaps a status code.
n: count &log &optional;
## Peer that raised this notice.
src_peer: event_peer &optional;
## Textual description for the peer that raised this notice.
peer_descr: string &log &optional;
## The actions which have been applied to this notice.
actions: set[Notice::Action] &log &optional;
## These are policy items that returned T and applied their action
## to the notice.
policy_items: set[count] &log &optional;
## By adding chunks of text into this element, other scripts can
## expand on notices that are being emailed. The normal way to add text
## is to extend the vector by handling the :bro:id:`Notice::notice`
## event and modifying the notice in place.
email_body_sections: vector of string &default=vector();
email_body_sections: vector of string &optional;
## Adding a string "token" to this set will cause the notice framework's
## built-in emailing functionality to delay sending the email until
## either the token has been removed or the email has been delayed
## for :bro:id:`Notice::max_email_delay`.
email_delay_tokens: set[string] &optional;
## This field is to be provided when a notice is generated for the
## purpose of deduplicating notices. The identifier string should
## be unique for a single instance of the notice. This field should be
## filled out in almost all cases when generating notices to define
## be unique for a single instance of the notice. This field should be
## filled out in almost all cases when generating notices to define
## when a notice is conceptually a duplicate of a previous notice.
##
## For example, an SSL certificate that is going to expire soon should
## always have the same identifier no matter the client IP address
##
## For example, an SSL certificate that is going to expire soon should
## always have the same identifier no matter the client IP address
## that connected and resulted in the certificate being exposed. In
## this case, the resp_h, resp_p, and hash of the certificate would be
## used to create this value. The hash of the cert is included
@ -124,19 +134,19 @@ export {
## Another example might be a host downloading a file which triggered
## a notice because the MD5 sum of the file it downloaded was known
## by some set of intelligence. In that case, the orig_h (client)
## and MD5 sum would be used in this field to dedup because if the
## and MD5 sum would be used in this field to dedup because if the
## same file is downloaded over and over again you really only want to
## know about it a single time. This makes it possible to send those
## notices to email without worrying so much about sending thousands
## of emails.
identifier: string &optional;
## This field indicates the length of time that this
## unique notice should be suppressed. This field is automatically
## unique notice should be suppressed. This field is automatically
## filled out and should not be written to by any other script.
suppress_for: interval &log &optional;
};
## Ignored notice types.
const ignored_types: set[Notice::Type] = {} &redef;
## Emailed notice types.
@ -145,10 +155,10 @@ export {
const alarmed_types: set[Notice::Type] = {} &redef;
## Types that should be suppressed for the default suppression interval.
const not_suppressed_types: set[Notice::Type] = {} &redef;
## This table can be used as a shorthand way to modify suppression
## This table can be used as a shorthand way to modify suppression
## intervals for entire notice types.
const type_suppression_intervals: table[Notice::Type] of interval = {} &redef;
## This is the record that defines the items that make up the notice policy.
type PolicyItem: record {
## This is the exact positional order in which the
@ -160,13 +170,13 @@ export {
priority: count &log &default=5;
## An action given to the notice if the predicate return true.
action: Notice::Action &log &default=ACTION_NONE;
## The pred (predicate) field is a function that returns a boolean T
## or F value. If the predicate function return true, the action in
## this record is applied to the notice that is given as an argument
## to the predicate function. If no predicate is supplied, it's
## The pred (predicate) field is a function that returns a boolean T
## or F value. If the predicate function return true, the action in
## this record is applied to the notice that is given as an argument
## to the predicate function. If no predicate is supplied, it's
## assumed that the PolicyItem always applies.
pred: function(n: Notice::Info): bool &log &optional;
## Indicates this item should terminate policy processing if the
## Indicates this item should terminate policy processing if the
## predicate returns T.
halt: bool &log &default=F;
## This defines the length of time that this particular notice should
@ -188,33 +198,35 @@ export {
[$pred(n: Notice::Info) = { return (n$note in Notice::emailed_types); },
$action = ACTION_EMAIL,
$priority = 8],
[$pred(n: Notice::Info) = {
if (n$note in Notice::type_suppression_intervals)
[$pred(n: Notice::Info) = {
if (n$note in Notice::type_suppression_intervals)
{
n$suppress_for=Notice::type_suppression_intervals[n$note];
return T;
}
return F;
return F;
},
$action = ACTION_NONE,
$priority = 8],
[$action = ACTION_LOG,
$priority = 0],
} &redef;
## Local system sendmail program.
const sendmail = "/usr/sbin/sendmail" &redef;
## Email address to send notices with the :bro:enum:`Notice::ACTION_EMAIL`
## action or to send bulk alarm logs on rotation with
## :bro:enum:`Notice::ACTION_ALARM`.
const mail_dest = "" &redef;
## Address that emails will be from.
const mail_from = "Big Brother <bro@localhost>" &redef;
## Reply-to address used in outbound email.
const reply_to = "" &redef;
## Text string prefixed to the subject of all emails sent out.
const mail_subject_prefix = "[Bro]" &redef;
## The maximum amount of time a plugin can delay email from being sent.
const max_email_delay = 15secs &redef;
## A log postprocessing function that implements emailing the contents
## of a log upon rotation to any configured :bro:id:`Notice::mail_dest`.
@ -225,8 +237,8 @@ export {
## Returns: True.
global log_mailing_postprocessor: function(info: Log::RotationInfo): bool;
## This is the event that is called as the entry point to the
## notice framework by the global :bro:id:`NOTICE` function. By the time
## This is the event that is called as the entry point to the
## notice framework by the global :bro:id:`NOTICE` function. By the time
## this event is generated, default values have already been filled out in
## the :bro:type:`Notice::Info` record and synchronous functions in the
## :bro:id:`Notice::sync_functions` have already been called. The notice
@ -235,19 +247,19 @@ export {
## n: The record containing notice data.
global notice: event(n: Info);
## This is a set of functions that provide a synchronous way for scripts
## This is a set of functions that provide a synchronous way for scripts
## extending the notice framework to run before the normal event based
## notice pathway that most of the notice framework takes. This is helpful
## in cases where an action against a notice needs to happen immediately
## and can't wait the short time for the event to bubble up to the top of
## the event queue. An example is the IP address dropping script that
## can block IP addresses that have notices generated because it
## the event queue. An example is the IP address dropping script that
## can block IP addresses that have notices generated because it
## needs to operate closer to real time than the event queue allows it to.
## Normally the event based extension model using the
## Normally the event based extension model using the
## :bro:id:`Notice::notice` event will work fine if there aren't harder
## real time constraints.
const sync_functions: set[function(n: Notice::Info)] = set() &redef;
## This event is generated when a notice begins to be suppressed.
##
## n: The record containing notice data regarding the notice type
@ -265,7 +277,7 @@ export {
## n: The record containing notice data regarding the notice type
## that was being suppressed.
global end_suppression: event(n: Notice::Info);
## Call this function to send a notice in an email. It is already used
## by default with the built in :bro:enum:`Notice::ACTION_EMAIL` and
## :bro:enum:`Notice::ACTION_PAGE` actions.
@ -307,22 +319,22 @@ function per_notice_suppression_interval(t: table[Notice::Type, string] of Notic
local n: Notice::Type;
local s: string;
[n,s] = idx;
local suppress_time = t[n,s]$suppress_for - (network_time() - t[n,s]$ts);
if ( suppress_time < 0secs )
suppress_time = 0secs;
# If there is no more suppression time left, the notice needs to be sent
# to the end_suppression event.
if ( suppress_time == 0secs )
event Notice::end_suppression(t[n,s]);
return suppress_time;
}
# This is the internally maintained notice suppression table. It's
# This is the internally maintained notice suppression table. It's
# indexed on the Notice::Type and the $identifier field from the notice.
global suppressing: table[Type, string] of Notice::Info = {}
global suppressing: table[Type, string] of Notice::Info = {}
&create_expire=0secs
&expire_func=per_notice_suppression_interval;
@ -349,7 +361,7 @@ function log_mailing_postprocessor(info: Log::RotationInfo): bool
event bro_init() &priority=5
{
Log::create_stream(Notice::LOG, [$columns=Info, $ev=log_notice]);
Log::create_stream(Notice::ALARM_LOG, [$columns=Notice::Info]);
# If Bro is configured for mailing notices, set up mailing for alarms.
# Make sure that this alarm log is also output as text so that it can
@ -390,25 +402,49 @@ function email_headers(subject_desc: string, dest: string): string
return header_text;
}
event delay_sending_email(n: Notice::Info, dest: string, extend: bool)
{
email_notice_to(n, dest, extend);
}
function email_notice_to(n: Notice::Info, dest: string, extend: bool)
{
if ( reading_traces() || dest == "" )
return;
if ( extend )
{
if ( |n$email_delay_tokens| > 0 )
{
# If we still are within the max_email_delay, keep delaying.
if ( n$ts + max_email_delay > network_time() )
{
schedule 1sec { delay_sending_email(n, dest, extend) };
return;
}
else
{
event reporter_info(network_time(),
fmt("Notice email delay tokens weren't released in time (%s).", n$email_delay_tokens),
"");
}
}
}
local email_text = email_headers(fmt("%s", n$note), dest);
# First off, finish the headers and include the human readable messages
# then leave a blank line after the message.
email_text = string_cat(email_text, "\nMessage: ", n$msg);
if ( n?$sub )
email_text = string_cat(email_text, "\nSub-message: ", n$sub);
email_text = string_cat(email_text, "\n\n");
# Next, add information about the connection if it exists.
if ( n?$id )
{
email_text = string_cat(email_text, "Connection: ",
email_text = string_cat(email_text, "Connection: ",
fmt("%s", n$id$orig_h), ":", fmt("%d", n$id$orig_p), " -> ",
fmt("%s", n$id$resp_h), ":", fmt("%d", n$id$resp_p), "\n");
if ( n?$uid )
@ -416,17 +452,18 @@ function email_notice_to(n: Notice::Info, dest: string, extend: bool)
}
else if ( n?$src )
email_text = string_cat(email_text, "Address: ", fmt("%s", n$src), "\n");
# Add the extended information if it's requested.
if ( extend )
{
email_text = string_cat(email_text, "\nEmail Extensions\n");
email_text = string_cat(email_text, "----------------\n");
for ( i in n$email_body_sections )
{
email_text = string_cat(email_text, "******************\n");
email_text = string_cat(email_text, n$email_body_sections[i], "\n");
}
}
email_text = string_cat(email_text, "\n\n--\n[Automatically generated]\n\n");
piped_exec(fmt("%s -t -oi", sendmail), email_text);
}
@ -439,10 +476,10 @@ event notice(n: Notice::Info) &priority=-5
Log::write(Notice::LOG, n);
if ( ACTION_ALARM in n$actions )
Log::write(Notice::ALARM_LOG, n);
# Normally suppress further notices like this one unless directed not to.
# n$identifier *must* be specified for suppression to function at all.
if ( n?$identifier &&
if ( n?$identifier &&
ACTION_NO_SUPPRESS !in n$actions &&
[n$note, n$identifier] !in suppressing &&
n$suppress_for != 0secs )
@ -465,7 +502,7 @@ function is_being_suppressed(n: Notice::Info): bool
else
return F;
}
# Executes a script with all of the notice fields put into the
# new process' environment as "BRO_ARG_<field>" variables.
function execute_with_notice(cmd: string, n: Notice::Info)
@ -474,9 +511,9 @@ function execute_with_notice(cmd: string, n: Notice::Info)
#local tgs = tags(n);
#system_env(cmd, tags);
}
# This is run synchronously as a function before all of the other
# notice related functions and events. It also modifies the
# This is run synchronously as a function before all of the other
# notice related functions and events. It also modifies the
# :bro:type:`Notice::Info` record in place.
function apply_policy(n: Notice::Info)
{
@ -491,7 +528,7 @@ function apply_policy(n: Notice::Info)
if ( ! n?$uid )
n$uid = n$conn$uid;
}
if ( n?$id )
{
if ( ! n?$src )
@ -502,8 +539,12 @@ function apply_policy(n: Notice::Info)
n$p = n$id$resp_p;
}
if ( n?$p )
n$proto = get_port_transport_proto(n$p);
if ( n?$iconn )
{
n$proto = icmp;
if ( ! n?$src )
n$src = n$iconn$orig_h;
if ( ! n?$dst )
@ -513,15 +554,20 @@ function apply_policy(n: Notice::Info)
if ( ! n?$src_peer )
n$src_peer = get_event_peer();
if ( ! n?$peer_descr )
n$peer_descr = n$src_peer?$descr ?
n$peer_descr = n$src_peer?$descr ?
n$src_peer$descr : fmt("%s", n$src_peer$host);
if ( ! n?$actions )
n$actions = set();
if ( ! n?$email_body_sections )
n$email_body_sections = vector();
if ( ! n?$email_delay_tokens )
n$email_delay_tokens = set();
if ( ! n?$policy_items )
n$policy_items = set();
for ( i in ordered_policy )
{
# If there's no predicate or the predicate returns F.
@ -529,51 +575,51 @@ function apply_policy(n: Notice::Info)
{
add n$actions[ordered_policy[i]$action];
add n$policy_items[int_to_count(i)];
# If the predicate matched and there was a suppression interval,
# If the predicate matched and there was a suppression interval,
# apply it to the notice now.
if ( ordered_policy[i]?$suppress_for )
n$suppress_for = ordered_policy[i]$suppress_for;
# If the policy item wants to halt policy processing, do it now!
if ( ordered_policy[i]$halt )
break;
}
}
# Apply the suppression time after applying the policy so that policy
# items can give custom suppression intervals. If there is no
# items can give custom suppression intervals. If there is no
# suppression interval given yet, the default is applied.
if ( ! n?$suppress_for )
n$suppress_for = default_suppression_interval;
# Delete the connection record if it's there so we aren't sending that
# to remote machines. It can cause problems due to the size of the
# to remote machines. It can cause problems due to the size of the
# connection record.
if ( n?$conn )
delete n$conn;
if ( n?$iconn )
delete n$iconn;
}
# Create the ordered notice policy automatically which will be used at runtime
# Create the ordered notice policy automatically which will be used at runtime
# for prioritized matching of the notice policy.
event bro_init() &priority=10
{
# Create the policy log here because it's only written to in this handler.
Log::create_stream(Notice::POLICY_LOG, [$columns=PolicyItem]);
local tmp: table[count] of set[PolicyItem] = table();
for ( pi in policy )
{
if ( pi$priority < 0 || pi$priority > 10 )
Reporter::fatal("All Notice::PolicyItem priorities must be within 0 and 10");
if ( pi$priority !in tmp )
tmp[pi$priority] = set();
add tmp[pi$priority][pi];
}
local rev_count = vector(10,9,8,7,6,5,4,3,2,1,0);
for ( i in rev_count )
{
@ -589,7 +635,7 @@ event bro_init() &priority=10
}
}
}
function internal_NOTICE(n: Notice::Info)
{
# Suppress this notice if necessary.

View file

@ -1,5 +1,5 @@
##! This script provides the framework for software version detection and
##! parsing, but doesn't actually do any detection on it's own. It relys on
##! parsing but doesn't actually do any detection on it's own. It relys on
##! other protocol specific scripts to parse out software from the protocols
##! that they analyze. The entry point for providing new software detections
##! to this framework is through the :bro:id:`Software::found` function.
@ -10,39 +10,44 @@
module Software;
export {
## The software logging stream identifier.
redef enum Log::ID += { LOG };
## Scripts detecting new types of software need to redef this enum to add
## their own specific software types which would then be used when they
## create :bro:type:`Software::Info` records.
type Type: enum {
## A placeholder type for when the type of software is not known.
UNKNOWN,
OPERATING_SYSTEM,
DATABASE_SERVER,
# There are a number of ways to detect printers on the
# network, we just need to codify them in a script and move
# this out of here. It isn't currently used for anything.
PRINTER,
};
## A structure to represent the numeric version of software.
type Version: record {
major: count &optional; ##< Major version number
minor: count &optional; ##< Minor version number
minor2: count &optional; ##< Minor subversion number
addl: string &optional; ##< Additional version string (e.g. "beta42")
## Major version number
major: count &optional;
## Minor version number
minor: count &optional;
## Minor subversion number
minor2: count &optional;
## Additional version string (e.g. "beta42")
addl: string &optional;
} &log;
## The record type that is used for representing and logging software.
type Info: record {
## The time at which the software was first detected.
## The time at which the software was detected.
ts: time &log;
## The IP address detected running the software.
host: addr &log;
## The type of software detected (e.g. WEB_SERVER)
## The type of software detected (e.g. :bro:enum:`HTTP::SERVER`).
software_type: Type &log &default=UNKNOWN;
## Name of the software (e.g. Apache)
## Name of the software (e.g. Apache).
name: string &log;
## Version of the software
## Version of the software.
version: Version &log;
## The full unparsed version string found because the version parsing
## doesn't work 100% reliably and this acts as a fall back in the logs.
## doesn't always work reliably in all cases and this acts as a
## fallback in the logs.
unparsed_version: string &log &optional;
## This can indicate that this software being detected should
@ -55,37 +60,48 @@ export {
force_log: bool &default=F;
};
## The hosts whose software should be detected and tracked.
## Hosts whose software should be detected and tracked.
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS
const asset_tracking = LOCAL_HOSTS &redef;
## Other scripts should call this function when they detect software.
## unparsed_version: This is the full string from which the
## :bro:type:`Software::Info` was extracted.
##
## id: The connection id where the software was discovered.
##
## info: A record representing the software discovered.
##
## Returns: T if the software was logged, F otherwise.
global found: function(id: conn_id, info: Software::Info): bool;
## This function can take many software version strings and parse them
## Take many common software version strings and parse them
## into a sensible :bro:type:`Software::Version` record. There are
## still many cases where scripts may have to have their own specific
## version parsing though.
##
## unparsed_version: The raw version string.
##
## host: The host where the software was discovered.
##
## software_type: The type of software.
##
## Returns: A complete record ready for the :bro:id:`Software::found` function.
global parse: function(unparsed_version: string,
host: addr,
software_type: Type): Info;
## Compare two versions.
## Compare two version records.
##
## Returns: -1 for v1 < v2, 0 for v1 == v2, 1 for v1 > v2.
## If the numerical version numbers match, the addl string
## is compared lexicographically.
global cmp_versions: function(v1: Version, v2: Version): int;
## This type represents a set of software. It's used by the
## :bro:id:`tracked` variable to store all known pieces of software
## for a particular host. It's indexed with the name of a piece of
## software such as "Firefox" and it yields a
## :bro:type:`Software::Info` record with more information about the
## software.
## Type to represent a collection of :bro:type:`Software::Info` records.
## It's indexed with the name of a piece of software such as "Firefox"
## and it yields a :bro:type:`Software::Info` record with more information
## about the software.
type SoftwareSet: table[string] of Info;
## The set of software associated with an address. Data expires from

File diff suppressed because it is too large Load diff

View file

@ -1,23 +1,27 @@
##! This script can be used to extract either the originator's data or the
##! responders data or both. By default nothing is extracted, and in order
##! to actually extract data the ``c$extract_orig`` and/or the
##! ``c$extract_resp`` variable must be set to T. One way to achieve this
##! would be to handle the connection_established event elsewhere and set the
##! extract_orig and extract_resp options there. However, there may be trouble
##! with the timing due the event queue delay.
##! This script does not work well in a cluster context unless it has a
##! remotely mounted disk to write the content files to.
##! ``c$extract_resp`` variable must be set to ``T``. One way to achieve this
##! would be to handle the :bro:id:`connection_established` event elsewhere
##! and set the ``extract_orig`` and ``extract_resp`` options there.
##! However, there may be trouble with the timing due to event queue delay.
##!
##! .. note::
##!
##! This script does not work well in a cluster context unless it has a
##! remotely mounted disk to write the content files to.
@load base/utils/files
module Conn;
export {
## The prefix given to files as they are opened on disk.
## The prefix given to files containing extracted connections as they are
## opened on disk.
const extraction_prefix = "contents" &redef;
## If this variable is set to T, then all contents of all files will be
## extracted.
## If this variable is set to ``T``, then all contents of all connections
## will be extracted.
const default_extract = F &redef;
}

View file

@ -4,7 +4,7 @@
module Conn;
export {
## Define inactivty timeouts by the service detected being used over
## Define inactivity timeouts by the service detected being used over
## the connection.
const analyzer_inactivity_timeouts: table[AnalyzerTag] of interval = {
# For interactive services, allow longer periods of inactivity.

View file

@ -1,17 +1,33 @@
##! This script manages the tracking/logging of general information regarding
##! TCP, UDP, and ICMP traffic. For UDP and ICMP, "connections" are to
##! be interpreted using flow semantics (sequence of packets from a source
##! host/post to a destination host/port). Further, ICMP "ports" are to
##! be interpreted as the source port meaning the ICMP message type and
##! the destination port being the ICMP message code.
@load base/utils/site
module Conn;
export {
## The connection logging stream identifier.
redef enum Log::ID += { LOG };
## The record type which contains column fields of the connection log.
type Info: record {
## This is the time of the first packet.
ts: time &log;
## A unique identifier of a connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## The transport layer protocol of the connection.
proto: transport_proto &log;
## An identification of an application protocol being sent over the
## the connection.
service: string &log &optional;
## How long the connection lasted. For 3-way or 4-way connection
## tear-downs, this will not include the final ACK.
duration: interval &log &optional;
## The number of payload bytes the originator sent. For TCP
## this is taken from sequence numbers and might be inaccurate
@ -51,8 +67,8 @@ export {
## have been completed prior to the packet loss.
missed_bytes: count &log &default=0;
## Records the state history of (TCP) connections as
## a string of letters.
## Records the state history of connections as a string of letters.
## For TCP connections the meaning of those letters is:
##
## ====== ====================================================
## Letter Meaning
@ -71,7 +87,8 @@ export {
## originator and lower case then means the responder.
## Also, there is compression. We only record one "d" in each direction,
## for instance. I.e., we just record that data went in that direction.
## This history is not meant to encode how much data that happened to be.
## This history is not meant to encode how much data that happened to
## be.
history: string &log &optional;
## Number of packets the originator sent.
## Only set if :bro:id:`use_conn_size_analyzer` = T
@ -85,7 +102,9 @@ export {
## Number IP level bytes the responder sent. See ``orig_pkts``.
resp_ip_bytes: count &log &optional;
};
## Event that can be handled to access the :bro:type:`Conn::Info`
## record as it is sent on to the logging framework.
global log_conn: event(rec: Info);
}

View file

@ -4,9 +4,9 @@
module DNS;
export {
const PTR = 12;
const EDNS = 41;
const ANY = 255;
const PTR = 12; ##< RR TYPE value for a domain name pointer.
const EDNS = 41; ##< An OPT RR TYPE value described by EDNS.
const ANY = 255; ##< A QTYPE value describing a request for all records.
## Mapping of DNS query type codes to human readable string representation.
const query_types = {
@ -29,50 +29,43 @@ export {
[ANY] = "*",
} &default = function(n: count): string { return fmt("query-%d", n); };
const code_types = {
[0] = "X0",
[1] = "Xfmt",
[2] = "Xsrv",
[3] = "Xnam",
[4] = "Ximp",
[5] = "X[",
} &default="?";
## Errors used for non-TSIG/EDNS types.
const base_errors = {
[0] = "NOERROR", ##< No Error
[1] = "FORMERR", ##< Format Error
[2] = "SERVFAIL", ##< Server Failure
[3] = "NXDOMAIN", ##< Non-Existent Domain
[4] = "NOTIMP", ##< Not Implemented
[5] = "REFUSED", ##< Query Refused
[6] = "YXDOMAIN", ##< Name Exists when it should not
[7] = "YXRRSET", ##< RR Set Exists when it should not
[8] = "NXRRSet", ##< RR Set that should exist does not
[9] = "NOTAUTH", ##< Server Not Authoritative for zone
[10] = "NOTZONE", ##< Name not contained in zone
[11] = "unassigned-11", ##< available for assignment
[12] = "unassigned-12", ##< available for assignment
[13] = "unassigned-13", ##< available for assignment
[14] = "unassigned-14", ##< available for assignment
[15] = "unassigned-15", ##< available for assignment
[16] = "BADVERS", ##< for EDNS, collision w/ TSIG
[17] = "BADKEY", ##< Key not recognized
[18] = "BADTIME", ##< Signature out of time window
[19] = "BADMODE", ##< Bad TKEY Mode
[20] = "BADNAME", ##< Duplicate key name
[21] = "BADALG", ##< Algorithm not supported
[22] = "BADTRUNC", ##< draft-ietf-dnsext-tsig-sha-05.txt
[3842] = "BADSIG", ##< 16 <= number collision with EDNS(16);
##< this is a translation from TSIG(16)
[0] = "NOERROR", # No Error
[1] = "FORMERR", # Format Error
[2] = "SERVFAIL", # Server Failure
[3] = "NXDOMAIN", # Non-Existent Domain
[4] = "NOTIMP", # Not Implemented
[5] = "REFUSED", # Query Refused
[6] = "YXDOMAIN", # Name Exists when it should not
[7] = "YXRRSET", # RR Set Exists when it should not
[8] = "NXRRSet", # RR Set that should exist does not
[9] = "NOTAUTH", # Server Not Authoritative for zone
[10] = "NOTZONE", # Name not contained in zone
[11] = "unassigned-11", # available for assignment
[12] = "unassigned-12", # available for assignment
[13] = "unassigned-13", # available for assignment
[14] = "unassigned-14", # available for assignment
[15] = "unassigned-15", # available for assignment
[16] = "BADVERS", # for EDNS, collision w/ TSIG
[17] = "BADKEY", # Key not recognized
[18] = "BADTIME", # Signature out of time window
[19] = "BADMODE", # Bad TKEY Mode
[20] = "BADNAME", # Duplicate key name
[21] = "BADALG", # Algorithm not supported
[22] = "BADTRUNC", # draft-ietf-dnsext-tsig-sha-05.txt
[3842] = "BADSIG", # 16 <= number collision with EDNS(16);
# this is a translation from TSIG(16)
} &default = function(n: count): string { return fmt("rcode-%d", n); };
# This deciphers EDNS Z field values.
## This deciphers EDNS Z field values.
const edns_zfield = {
[0] = "NOVALUE", # regular entry
[32768] = "DNS_SEC_OK", # accepts DNS Sec RRs
} &default="?";
## Possible values of the CLASS field in resource records or QCLASS field
## in query messages.
const classes = {
[1] = "C_INTERNET",
[2] = "C_CSNET",
@ -81,4 +74,4 @@ export {
[254] = "C_NONE",
[255] = "C_ANY",
} &default = function(n: count): string { return fmt("qclass-%d", n); };
}
}

View file

@ -1,54 +1,106 @@
##! Base DNS analysis script which tracks and logs DNS queries along with
##! their responses.
@load ./consts
module DNS;
export {
## The DNS logging stream identifier.
redef enum Log::ID += { LOG };
## The record type which contains the column fields of the DNS log.
type Info: record {
ts: time &log;
uid: string &log;
id: conn_id &log;
proto: transport_proto &log;
trans_id: count &log &optional;
query: string &log &optional;
qclass: count &log &optional;
qclass_name: string &log &optional;
qtype: count &log &optional;
qtype_name: string &log &optional;
rcode: count &log &optional;
rcode_name: string &log &optional;
QR: bool &log &default=F;
AA: bool &log &default=F;
TC: bool &log &default=F;
RD: bool &log &default=F;
RA: bool &log &default=F;
Z: count &log &default=0;
TTL: interval &log &optional;
answers: set[string] &log &optional;
## This value indicates if this request/response pair is ready to be logged.
## The earliest time at which a DNS protocol message over the
## associated connection is observed.
ts: time &log;
## A unique identifier of the connection over which DNS messages
## are being transferred.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## The transport layer protocol of the connection.
proto: transport_proto &log;
## A 16 bit identifier assigned by the program that generated the
## DNS query. Also used in responses to match up replies to
## outstanding queries.
trans_id: count &log &optional;
## The domain name that is the subject of the DNS query.
query: string &log &optional;
## The QCLASS value specifying the class of the query.
qclass: count &log &optional;
## A descriptive name for the class of the query.
qclass_name: string &log &optional;
## A QTYPE value specifying the type of the query.
qtype: count &log &optional;
## A descriptive name for the type of the query.
qtype_name: string &log &optional;
## The response code value in DNS response messages.
rcode: count &log &optional;
## A descriptive name for the response code value.
rcode_name: string &log &optional;
## Whether the message is a query (F) or response (T).
QR: bool &log &default=F;
## The Authoritative Answer bit for response messages specifies that
## the responding name server is an authority for the domain name
## in the question section.
AA: bool &log &default=F;
## The Truncation bit specifies that the message was truncated.
TC: bool &log &default=F;
## The Recursion Desired bit indicates to a name server to recursively
## purse the query.
RD: bool &log &default=F;
## The Recursion Available bit in a response message indicates if
## the name server supports recursive queries.
RA: bool &log &default=F;
## A reserved field that is currently supposed to be zero in all
## queries and responses.
Z: count &log &default=0;
## The set of resource descriptions in answer of the query.
answers: vector of string &log &optional;
## The caching intervals of the associated RRs described by the
## ``answers`` field.
TTLs: vector of interval &log &optional;
## This value indicates if this request/response pair is ready to be
## logged.
ready: bool &default=F;
## The total number of resource records in a reply message's answer
## section.
total_answers: count &optional;
## The total number of resource records in a reply message's answer,
## authority, and additional sections.
total_replies: count &optional;
};
## A record type which tracks the status of DNS queries for a given
## :bro:type:`connection`.
type State: record {
## Indexed by query id, returns Info record corresponding to
## query/response which haven't completed yet.
pending: table[count] of Info &optional;
## This is the list of DNS responses that have completed based on the
## number of responses declared and the number received. The contents
## of the set are transaction IDs.
finished_answers: set[count] &optional;
};
## An event that can be handled to access the :bro:type:`DNS::Info`
## record as it is sent to the logging framework.
global log_dns: event(rec: Info);
## This is called by the specific dns_*_reply events with a "reply" which
## may not represent the full data available from the resource record, but
## may not represent the full data available from the resource record, but
## it's generally considered a summarization of the response(s).
##
## c: The connection record for which to fill in DNS reply data.
##
## msg: The DNS message header information for the response.
##
## ans: The general information of a RR response.
##
## reply: The specific response information according to RR type/class.
global do_reply: event(c: connection, msg: dns_msg, ans: dns_answer, reply: string);
}
@ -58,11 +110,11 @@ redef record connection += {
};
# DPD configuration.
redef capture_filters += {
redef capture_filters += {
["dns"] = "port 53",
["mdns"] = "udp and port 5353",
["llmns"] = "udp and port 5355",
["netbios-ns"] = "udp port 137",
["netbios-ns"] = "udp port 137",
};
const dns_ports = { 53/udp, 53/tcp, 137/udp, 5353/udp, 5355/udp };
@ -89,7 +141,7 @@ function new_session(c: connection, trans_id: count): Info
state$finished_answers=set();
c$dns_state = state;
}
local info: Info;
info$ts = network_time();
info$id = c$id;
@ -102,23 +154,29 @@ function new_session(c: connection, trans_id: count): Info
function set_session(c: connection, msg: dns_msg, is_query: bool)
{
if ( ! c?$dns_state || msg$id !in c$dns_state$pending )
{
c$dns_state$pending[msg$id] = new_session(c, msg$id);
# Try deleting this transaction id from the set of finished answers.
# Sometimes hosts will reuse ports and transaction ids and this should
# be considered to be a legit scenario (although bad practice).
delete c$dns_state$finished_answers[msg$id];
}
c$dns = c$dns_state$pending[msg$id];
c$dns$rcode = msg$rcode;
c$dns$rcode_name = base_errors[msg$rcode];
if ( ! is_query )
{
if ( ! c$dns?$total_answers )
c$dns$total_answers = msg$num_answers;
if ( c$dns?$total_replies &&
if ( c$dns?$total_replies &&
c$dns$total_replies != msg$num_answers + msg$num_addl + msg$num_auth )
{
event conn_weird("dns_changed_number_of_responses", c,
fmt("The declared number of responses changed from %d to %d",
event conn_weird("dns_changed_number_of_responses", c,
fmt("The declared number of responses changed from %d to %d",
c$dns$total_replies,
msg$num_answers + msg$num_addl + msg$num_auth));
}
@ -129,27 +187,30 @@ function set_session(c: connection, msg: dns_msg, is_query: bool)
}
}
}
event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=5
{
set_session(c, msg, F);
c$dns$AA = msg$AA;
c$dns$RA = msg$RA;
c$dns$TTL = ans$TTL;
if ( ans$answer_type == DNS_ANS )
{
c$dns$AA = msg$AA;
c$dns$RA = msg$RA;
if ( msg$id in c$dns_state$finished_answers )
event conn_weird("dns_reply_seen_after_done", c, "");
if ( reply != "" )
{
if ( ! c$dns?$answers )
c$dns$answers = set();
add c$dns$answers[reply];
c$dns$answers = vector();
c$dns$answers[|c$dns$answers|] = reply;
if ( ! c$dns?$TTLs )
c$dns$TTLs = vector();
c$dns$TTLs[|c$dns$TTLs|] = ans$TTL;
}
if ( c$dns?$answers && |c$dns$answers| == c$dns$total_answers )
{
add c$dns_state$finished_answers[c$dns$trans_id];
@ -158,13 +219,12 @@ event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string)
}
}
}
event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=-5
{
if ( c$dns$ready )
{
Log::write(DNS::LOG, c$dns);
add c$dns_state$finished_answers[c$dns$trans_id];
# This record is logged and no longer pending.
delete c$dns_state$pending[c$dns$trans_id];
}
@ -173,41 +233,41 @@ event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string)
event dns_request(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count) &priority=5
{
set_session(c, msg, T);
c$dns$RD = msg$RD;
c$dns$TC = msg$TC;
c$dns$qclass = qclass;
c$dns$qclass_name = classes[qclass];
c$dns$qtype = qtype;
c$dns$qtype_name = query_types[qtype];
# Decode netbios name queries
# Note: I'm ignoring the name type for now. Not sure if this should be
# Note: I'm ignoring the name type for now. Not sure if this should be
# worked into the query/response in some fashion.
if ( c$id$resp_p == 137/udp )
query = decode_netbios_name(query);
c$dns$query = query;
c$dns$Z = msg$Z;
}
event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5
{
event DNS::do_reply(c, msg, ans, fmt("%s", a));
}
event dns_TXT_reply(c: connection, msg: dns_msg, ans: dns_answer, str: string) &priority=5
{
event DNS::do_reply(c, msg, ans, str);
}
event dns_AAAA_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr,
event dns_AAAA_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr,
astr: string) &priority=5
{
# TODO: What should we do with astr?
event DNS::do_reply(c, msg, ans, fmt("%s", a));
}
event dns_NS_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5
{
event DNS::do_reply(c, msg, ans, name);
@ -223,12 +283,12 @@ event dns_MX_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string,
{
event DNS::do_reply(c, msg, ans, name);
}
event dns_PTR_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5
{
event DNS::do_reply(c, msg, ans, name);
}
event dns_SOA_reply(c: connection, msg: dns_msg, ans: dns_answer, soa: dns_soa) &priority=5
{
event DNS::do_reply(c, msg, ans, soa$mname);
@ -238,7 +298,7 @@ event dns_WKS_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5
{
event DNS::do_reply(c, msg, ans, "");
}
event dns_SRV_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5
{
event DNS::do_reply(c, msg, ans, "");
@ -247,17 +307,17 @@ event dns_SRV_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5
# TODO: figure out how to handle these
#event dns_EDNS(c: connection, msg: dns_msg, ans: dns_answer)
# {
#
#
# }
#
#event dns_EDNS_addl(c: connection, msg: dns_msg, ans: dns_edns_additional)
# {
#
#
# }
#
#event dns_TSIG_addl(c: connection, msg: dns_msg, ans: dns_tsig_additional)
# {
#
#
# }
@ -271,10 +331,10 @@ event connection_state_remove(c: connection) &priority=-5
{
if ( ! c?$dns_state )
return;
# If Bro is expiring state, we should go ahead and log all unlogged
# If Bro is expiring state, we should go ahead and log all unlogged
# request/response pairs now.
for ( trans_id in c$dns_state$pending )
Log::write(DNS::LOG, c$dns_state$pending[trans_id]);
}

View file

@ -1,4 +1,4 @@
##! File extraction for FTP.
##! File extraction support for FTP.
@load ./main
@load base/utils/files
@ -6,7 +6,7 @@
module FTP;
export {
## Pattern of file mime types to extract from FTP entity bodies.
## Pattern of file mime types to extract from FTP transfers.
const extract_file_types = /NO_DEFAULT/ &redef;
## The on-disk prefix for files to be extracted from FTP-data transfers.
@ -14,10 +14,15 @@ export {
}
redef record Info += {
## The file handle for the file to be extracted
## On disk file where it was extracted to.
extraction_file: file &log &optional;
## Indicates if the current command/response pair should attempt to
## extract the file if a file was transferred.
extract_file: bool &default=F;
## Internal tracking of the total number of files extracted during this
## session.
num_extracted_files: count &default=0;
};
@ -33,7 +38,6 @@ event file_transferred(c: connection, prefix: string, descr: string,
if ( extract_file_types in s$mime_type )
{
s$extract_file = T;
add s$tags["extracted_file"];
++s$num_extracted_files;
}
}

View file

@ -1,11 +1,7 @@
##! The logging this script does is primarily focused on logging FTP commands
##! along with metadata. For example, if files are transferred, the argument
##! will take on the full path that the client is at along with the requested
##! file name.
##!
##! TODO:
##!
##! * Handle encrypted sessions correctly (get an example?)
##! file name.
@load ./utils-commands
@load base/utils/paths
@ -14,38 +10,64 @@
module FTP;
export {
## The FTP protocol logging stream identifier.
redef enum Log::ID += { LOG };
## List of commands that should have their command/response pairs logged.
const logged_commands = {
"APPE", "DELE", "RETR", "STOR", "STOU", "ACCT"
} &redef;
## This setting changes if passwords used in FTP sessions are captured or not.
const default_capture_password = F &redef;
## User IDs that can be considered "anonymous".
const guest_ids = { "anonymous", "ftp", "guest" } &redef;
type Info: record {
## Time when the command was sent.
ts: time &log;
uid: string &log;
id: conn_id &log;
## User name for the current FTP session.
user: string &log &default="<unknown>";
## Password for the current FTP session if captured.
password: string &log &optional;
## Command given by the client.
command: string &log &optional;
## Argument for the command if one is given.
arg: string &log &optional;
## Libmagic "sniffed" file type if the command indicates a file transfer.
mime_type: string &log &optional;
## Libmagic "sniffed" file description if the command indicates a file transfer.
mime_desc: string &log &optional;
## Size of the file if the command indicates a file transfer.
file_size: count &log &optional;
## Reply code from the server in response to the command.
reply_code: count &log &optional;
## Reply message from the server in response to the command.
reply_msg: string &log &optional;
## Arbitrary tags that may indicate a particular attribute of this command.
tags: set[string] &log &default=set();
## By setting the CWD to '/.', we can indicate that unless something
## Current working directory that this session is in. By making
## the default value '/.', we can indicate that unless something
## more concrete is discovered that the existing but unknown
## directory is ok to use.
cwd: string &default="/.";
## Command that is currently waiting for a response.
cmdarg: CmdArg &optional;
## Queue for commands that have been sent but not yet responded to
## are tracked here.
pending_commands: PendingCmds;
## This indicates if the session is in active or passive mode.
## Indicates if the session is in active or passive mode.
passive: bool &default=F;
## This determines if the password will be captured for this request.
## Determines if the password will be captured for this request.
capture_password: bool &default=default_capture_password;
};
@ -56,22 +78,12 @@ export {
y: count;
z: count;
};
# TODO: add this back in some form. raise a notice again?
#const excessive_filename_len = 250 &redef;
#const excessive_filename_trunc_len = 32 &redef;
## These are user IDs that can be considered "anonymous".
const guest_ids = { "anonymous", "ftp", "guest" } &redef;
## The list of commands that should have their command/response pairs logged.
const logged_commands = {
"APPE", "DELE", "RETR", "STOR", "STOU", "ACCT"
} &redef;
## This function splits FTP reply codes into the three constituent
## Parse FTP reply codes into the three constituent single digit values.
global parse_ftp_reply_code: function(code: count): ReplyCode;
## Event that can be handled to access the :bro:type:`FTP::Info`
## record as it is sent on to the logging framework.
global log_ftp: event(rec: Info);
}

View file

@ -2,14 +2,22 @@ module FTP;
export {
type CmdArg: record {
## Time when the command was sent.
ts: time;
## Command.
cmd: string &default="<unknown>";
## Argument for the command if one was given.
arg: string &default="";
## Counter to track how many commands have been executed.
seq: count &default=0;
};
## Structure for tracking pending commands in the event that the client
## sends a large number of commands before the server has a chance to
## reply.
type PendingCmds: table[count] of CmdArg;
## Possible response codes for a wide variety of FTP commands.
const cmd_reply_code: set[string, count] = {
# According to RFC 959
["<init>", [120, 220, 421]],

View file

@ -8,29 +8,24 @@
module HTTP;
export {
## Pattern of file mime types to extract from HTTP entity bodies.
## Pattern of file mime types to extract from HTTP response entity bodies.
const extract_file_types = /NO_DEFAULT/ &redef;
## The on-disk prefix for files to be extracted from HTTP entity bodies.
const extraction_prefix = "http-item" &redef;
redef record Info += {
## This field can be set per-connection to determine if the entity body
## will be extracted. It must be set to T on or before the first
## entity_body_data event.
extracting_file: bool &default=F;
## This is the holder for the file handle as the file is being written
## to disk.
## On-disk file where the response body was extracted to.
extraction_file: file &log &optional;
};
redef record State += {
entity_bodies: count &default=0;
## Indicates if the response body is to be extracted or not. Must be
## set before or by the first :bro:id:`http_entity_data` event for the
## content.
extract_file: bool &default=F;
};
}
event http_entity_data(c: connection, is_orig: bool, length: count, data: string) &priority=5
event http_entity_data(c: connection, is_orig: bool, length: count, data: string) &priority=-5
{
# Client body extraction is not currently supported in this script.
if ( is_orig )
@ -41,8 +36,12 @@ event http_entity_data(c: connection, is_orig: bool, length: count, data: string
if ( c$http?$mime_type &&
extract_file_types in c$http$mime_type )
{
c$http$extracting_file = T;
local suffix = fmt("%s_%d.dat", is_orig ? "orig" : "resp", ++c$http_state$entity_bodies);
c$http$extract_file = T;
}
if ( c$http$extract_file )
{
local suffix = fmt("%s_%d.dat", is_orig ? "orig" : "resp", c$http_state$current_response);
local fname = generate_extraction_filename(extraction_prefix, c, suffix);
c$http$extraction_file = open(fname);
@ -50,12 +49,12 @@ event http_entity_data(c: connection, is_orig: bool, length: count, data: string
}
}
if ( c$http$extracting_file )
if ( c$http?$extraction_file )
print c$http$extraction_file, data;
}
event http_end_entity(c: connection, is_orig: bool)
{
if ( c$http$extracting_file )
if ( c$http?$extraction_file )
close(c$http$extraction_file);
}

View file

@ -11,7 +11,8 @@ export {
};
redef record Info += {
## The MD5 sum for a file transferred over HTTP will be stored here.
## MD5 sum for a file transferred over HTTP calculated from the
## response body.
md5: string &log &optional;
## This value can be set per-transfer to determine per request
@ -19,8 +20,8 @@ export {
## set to T at the time of or before the first chunk of body data.
calc_md5: bool &default=F;
## This boolean value indicates if an MD5 sum is currently being
## calculated for the current file transfer.
## Indicates if an MD5 sum is being calculated for the current
## request/response pair.
calculating_md5: bool &default=F;
};

View file

@ -1,5 +1,4 @@
##! This script is involved in the identification of file types in HTTP
##! response bodies.
##! Identification of file types in HTTP response bodies with file content sniffing.
@load base/frameworks/signatures
@load base/frameworks/notice
@ -15,30 +14,32 @@ module HTTP;
export {
redef enum Notice::Type += {
# This notice is thrown when the file extension doesn't
# seem to match the file contents.
## Indicates when the file extension doesn't seem to match the file contents.
Incorrect_File_Type,
};
redef record Info += {
## This will record the mime_type identified.
## Mime type of response body identified by content sniffing.
mime_type: string &log &optional;
## This indicates that no data of the current file transfer has been
## Indicates that no data of the current file transfer has been
## seen yet. After the first :bro:id:`http_entity_data` event, it
## will be set to T.
## will be set to F.
first_chunk: bool &default=T;
};
redef enum Tags += {
IDENTIFIED_FILE
};
# Create regexes that *should* in be in the urls for specifics mime types.
# Notices are thrown if the pattern doesn't match the url for the file type.
## Mapping between mime types and regular expressions for URLs
## The :bro:enum:`HTTP::Incorrect_File_Type` notice is generated if the pattern
## doesn't match the mime type that was discovered.
const mime_types_extensions: table[string] of pattern = {
["application/x-dosexec"] = /\.([eE][xX][eE]|[dD][lL][lL])/,
} &redef;
## A pattern for filtering out :bro:enum:`HTTP::Incorrect_File_Type` urls
## that are not noteworthy before a notice is created. Each
## pattern added should match the complete URL (the matched URLs include
## "http://" at the beginning).
const ignored_incorrect_file_type_urls = /^$/ &redef;
}
event signature_match(state: signature_state, msg: string, data: string) &priority=5
@ -59,6 +60,10 @@ event signature_match(state: signature_state, msg: string, data: string) &priori
c$http?$uri && mime_types_extensions[msg] !in c$http$uri )
{
local url = build_url_http(c$http);
if ( url == ignored_incorrect_file_type_urls )
return;
local message = fmt("%s %s %s", msg, c$http$method, url);
NOTICE([$note=Incorrect_File_Type,
$msg=message,

View file

@ -1,3 +1,7 @@
##! Implements base functionality for HTTP analysis. The logging model is
##! to log request/response pairs and all relevant metadata together in
##! a single record.
@load base/utils/numbers
@load base/utils/files
@ -8,6 +12,7 @@ export {
## Indicate a type of attack or compromise in the record to be logged.
type Tags: enum {
## Placeholder.
EMPTY
};
@ -15,64 +20,69 @@ export {
const default_capture_password = F &redef;
type Info: record {
ts: time &log;
uid: string &log;
id: conn_id &log;
## This represents the pipelined depth into the connection of this
## Timestamp for when the request happened.
ts: time &log;
uid: string &log;
id: conn_id &log;
## Represents the pipelined depth into the connection of this
## request/response transaction.
trans_depth: count &log;
## The verb used in the HTTP request (GET, POST, HEAD, etc.).
method: string &log &optional;
## The value of the HOST header.
host: string &log &optional;
## The URI used in the request.
uri: string &log &optional;
## The value of the "referer" header. The comment is deliberately
trans_depth: count &log;
## Verb used in the HTTP request (GET, POST, HEAD, etc.).
method: string &log &optional;
## Value of the HOST header.
host: string &log &optional;
## URI used in the request.
uri: string &log &optional;
## Value of the "referer" header. The comment is deliberately
## misspelled like the standard declares, but the name used here is
## "referrer" spelled correctly.
referrer: string &log &optional;
## The value of the User-Agent header from the client.
user_agent: string &log &optional;
## The actual uncompressed content size of the data transferred from
referrer: string &log &optional;
## Value of the User-Agent header from the client.
user_agent: string &log &optional;
## Actual uncompressed content size of the data transferred from
## the client.
request_body_len: count &log &default=0;
## The actual uncompressed content size of the data transferred from
request_body_len: count &log &default=0;
## Actual uncompressed content size of the data transferred from
## the server.
response_body_len: count &log &default=0;
## The status code returned by the server.
## Status code returned by the server.
status_code: count &log &optional;
## The status message returned by the server.
## Status message returned by the server.
status_msg: string &log &optional;
## The last 1xx informational reply code returned by the server.
## Last seen 1xx informational reply code returned by the server.
info_code: count &log &optional;
## The last 1xx informational reply message returned by the server.
## Last seen 1xx informational reply message returned by the server.
info_msg: string &log &optional;
## The filename given in the Content-Disposition header
## sent by the server.
## Filename given in the Content-Disposition header sent by the server.
filename: string &log &optional;
## This is a set of indicators of various attributes discovered and
## A set of indicators of various attributes discovered and
## related to a particular request/response pair.
tags: set[Tags] &log;
## The username if basic-auth is performed for the request.
## Username if basic-auth is performed for the request.
username: string &log &optional;
## The password if basic-auth is performed for the request.
## Password if basic-auth is performed for the request.
password: string &log &optional;
## This determines if the password will be captured for this request.
## Determines if the password will be captured for this request.
capture_password: bool &default=default_capture_password;
## All of the headers that may indicate if the request was proxied.
proxied: set[string] &log &optional;
};
## Structure to maintain state for an HTTP connection with multiple
## requests and responses.
type State: record {
## Pending requests.
pending: table[count] of Info;
current_response: count &default=0;
## Current request in the pending queue.
current_request: count &default=0;
## Current response in the pending queue.
current_response: count &default=0;
};
## The list of HTTP headers typically used to indicate a proxied request.
## A list of HTTP headers typically used to indicate proxied requests.
const proxy_headers: set[string] = {
"FORWARDED",
"X-FORWARDED-FOR",
@ -83,6 +93,8 @@ export {
"PROXY-CONNECTION",
} &redef;
## Event that can be handled to access the HTTP record as it is sent on
## to the logging framework.
global log_http: event(rec: Info);
}

View file

@ -5,8 +5,31 @@
module HTTP;
export {
## Given a string containing a series of key-value pairs separated by "=",
## this function can be used to parse out all of the key names.
##
## data: The raw data, such as a URL or cookie value.
##
## kv_splitter: A regular expression representing the separator between
## key-value pairs.
##
## Returns: A vector of strings containing the keys.
global extract_keys: function(data: string, kv_splitter: pattern): string_vec;
## Creates a URL from an :bro:type:`HTTP::Info` record. This should handle
## edge cases such as proxied requests appropriately.
##
## rec: An :bro:type:`HTTP::Info` record.
##
## Returns: A URL, not prefixed by "http://".
global build_url: function(rec: Info): string;
## Creates a URL from an :bro:type:`HTTP::Info` record. This should handle
## edge cases such as proxied requests appropriately.
##
## rec: An :bro:type:`HTTP::Info` record.
##
## Returns: A URL prefixed with "http://".
global build_url_http: function(rec: Info): string;
}

View file

@ -5,8 +5,9 @@
##! but that connection will actually be between B and C which could be
##! analyzed on a different worker.
##!
##! Example line from IRC server indicating that the DCC SEND is about to start:
##! PRIVMSG my_nick :^ADCC SEND whateverfile.zip 3640061780 1026 41709^A
# Example line from IRC server indicating that the DCC SEND is about to start:
# PRIVMSG my_nick :^ADCC SEND whateverfile.zip 3640061780 1026 41709^A
@load ./main
@load base/utils/files
@ -14,24 +15,25 @@
module IRC;
export {
redef enum Tag += { EXTRACTED_FILE };
## Pattern of file mime types to extract from IRC DCC file transfers.
const extract_file_types = /NO_DEFAULT/ &redef;
## The on-disk prefix for files to be extracted from IRC DCC file transfers.
## On-disk prefix for files to be extracted from IRC DCC file transfers.
const extraction_prefix = "irc-dcc-item" &redef;
redef record Info += {
dcc_file_name: string &log &optional;
dcc_file_size: count &log &optional;
dcc_mime_type: string &log &optional;
## DCC filename requested.
dcc_file_name: string &log &optional;
## Size of the DCC transfer as indicated by the sender.
dcc_file_size: count &log &optional;
## Sniffed mime type of the file.
dcc_mime_type: string &log &optional;
## The file handle for the file to be extracted
extraction_file: file &log &optional;
extraction_file: file &log &optional;
## A boolean to indicate if the current file transfer should be extraced.
extract_file: bool &default=F;
## A boolean to indicate if the current file transfer should be extracted.
extract_file: bool &default=F;
## The count of the number of file that have been extracted during the session.
num_extracted_files: count &default=0;
@ -54,8 +56,10 @@ event file_transferred(c: connection, prefix: string, descr: string,
if ( extract_file_types == irc$dcc_mime_type )
{
irc$extract_file = T;
add irc$tags[EXTRACTED_FILE];
}
if ( irc$extract_file )
{
local suffix = fmt("%d.dat", ++irc$num_extracted_files);
local fname = generate_extraction_filename(extraction_prefix, c, suffix);
irc$extraction_file = open(fname);
@ -76,7 +80,7 @@ event file_transferred(c: connection, prefix: string, descr: string,
Log::write(IRC::LOG, irc);
irc$command = tmp;
if ( irc$extract_file && irc?$extraction_file )
if ( irc?$extraction_file )
set_contents_file(id, CONTENTS_RESP, irc$extraction_file);
# Delete these values in case another DCC transfer

View file

@ -1,36 +1,38 @@
##! This is the script that implements the core IRC analysis support. It only
##! logs a very limited subset of the IRC protocol by default. The points
##! that it logs at are NICK commands, USER commands, and JOIN commands. It
##! log various bits of meta data as indicated in the :bro:type:`Info` record
##! along with the command at the command arguments.
##! Implements the core IRC analysis support. The logging model is to log
##! IRC commands along with the associated response and some additional
##! metadata about the connection if it's available.
module IRC;
export {
redef enum Log::ID += { LOG };
type Tag: enum {
EMPTY
};
type Info: record {
## Timestamp when the command was seen.
ts: time &log;
uid: string &log;
id: conn_id &log;
## Nick name given for the connection.
nick: string &log &optional;
## User name given for the connection.
user: string &log &optional;
channels: set[string] &log &optional;
## Command given by the client.
command: string &log &optional;
## Value for the command given by the client.
value: string &log &optional;
## Any additional data for the command.
addl: string &log &optional;
tags: set[Tag] &log;
};
## Event that can be handled to access the IRC record as it is sent on
## to the logging framework.
global irc_log: event(rec: Info);
}
redef record connection += {
## IRC session information.
irc: Info &optional;
};

View file

@ -14,15 +14,17 @@
module SSH;
export {
## The SSH protocol logging stream identifier.
redef enum Log::ID += { LOG };
redef enum Notice::Type += {
## This indicates that a heuristically detected "successful" SSH
## Indicates that a heuristically detected "successful" SSH
## authentication occurred.
Login
};
type Info: record {
## Time when the SSH connection began.
ts: time &log;
uid: string &log;
id: conn_id &log;
@ -34,11 +36,11 @@ export {
## would be set for the opposite situation.
# TODO: handle local-local and remote-remote better.
direction: Direction &log &optional;
## The software string given by the client.
## Software string given by the client.
client: string &log &optional;
## The software string given by the server.
## Software string given by the server.
server: string &log &optional;
## The amount of data returned from the server. This is currently
## Amount of data returned from the server. This is currently
## the only measure of the success heuristic and it is logged to
## assist analysts looking at the logs to make their own determination
## about the success on a case-by-case basis.
@ -48,8 +50,8 @@ export {
done: bool &default=F;
};
## The size in bytes at which the SSH connection is presumed to be
## successful.
## The size in bytes of data sent by the server at which the SSH
## connection is presumed to be successful.
const authentication_data_size = 5500 &redef;
## If true, we tell the event engine to not look at further data
@ -58,14 +60,16 @@ export {
## kinds of analyses (e.g., tracking connection size).
const skip_processing_after_detection = F &redef;
## This event is generated when the heuristic thinks that a login
## Event that is generated when the heuristic thinks that a login
## was successful.
global heuristic_successful_login: event(c: connection);
## This event is generated when the heuristic thinks that a login
## Event that is generated when the heuristic thinks that a login
## failed.
global heuristic_failed_login: event(c: connection);
## Event that can be handled to access the :bro:type:`SSH::Info`
## record as it is sent on to the logging framework.
global log_ssh: event(rec: Info);
}

View file

@ -1,18 +1,65 @@
module SSL;
export {
const SSLv2 = 0x0002;
const SSLv3 = 0x0300;
const TLSv10 = 0x0301;
const TLSv11 = 0x0302;
const TLSv12 = 0x0303;
## Mapping between the constants and string values for SSL/TLS versions.
const version_strings: table[count] of string = {
[SSLv2] = "SSLv2",
[SSLv3] = "SSLv3",
[TLSv10] = "TLSv10",
[TLSv11] = "TLSv11",
[TLSv12] = "TLSv12",
} &default="UNKNOWN";
## Mapping between numeric codes and human readable strings for alert
## levels.
const alert_levels: table[count] of string = {
[1] = "warning",
[2] = "fatal",
} &default=function(i: count):string { return fmt("unknown-%d", i); };
## Mapping between numeric codes and human readable strings for alert
## descriptions..
const alert_descriptions: table[count] of string = {
[0] = "close_notify",
[10] = "unexpected_message",
[20] = "bad_record_mac",
[21] = "decryption_failed",
[22] = "record_overflow",
[30] = "decompression_failure",
[40] = "handshake_failure",
[41] = "no_certificate",
[42] = "bad_certificate",
[43] = "unsupported_certificate",
[44] = "certificate_revoked",
[45] = "certificate_expired",
[46] = "certificate_unknown",
[47] = "illegal_parameter",
[48] = "unknown_ca",
[49] = "access_denied",
[50] = "decode_error",
[51] = "decrypt_error",
[60] = "export_restriction",
[70] = "protocol_version",
[71] = "insufficient_security",
[80] = "internal_error",
[90] = "user_canceled",
[100] = "no_renegotiation",
[110] = "unsupported_extension",
[111] = "certificate_unobtainable",
[112] = "unrecognized_name",
[113] = "bad_certificate_status_response",
[114] = "bad_certificate_hash_value",
[115] = "unknown_psk_identity",
} &default=function(i: count):string { return fmt("unknown-%d", i); };
## Mapping between numeric codes and human readable strings for SSL/TLS
## extensions.
# More information can be found here:
# http://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xml
const extensions: table[count] of string = {
[0] = "server_name",
@ -31,10 +78,11 @@ export {
[13] = "signature_algorithms",
[14] = "use_srtp",
[35] = "SessionTicket TLS",
[13172] = "next_protocol_negotiation",
[65281] = "renegotiation_info"
} &default=function(i: count):string { return fmt("unknown-%d", i); };
## SSLv2
# SSLv2
const SSLv20_CK_RC4_128_WITH_MD5 = 0x010080;
const SSLv20_CK_RC4_128_EXPORT40_WITH_MD5 = 0x020080;
const SSLv20_CK_RC2_128_CBC_WITH_MD5 = 0x030080;
@ -43,7 +91,7 @@ export {
const SSLv20_CK_DES_64_CBC_WITH_MD5 = 0x060040;
const SSLv20_CK_DES_192_EDE3_CBC_WITH_MD5 = 0x0700C0;
## TLS
# TLS
const TLS_NULL_WITH_NULL_NULL = 0x0000;
const TLS_RSA_WITH_NULL_MD5 = 0x0001;
const TLS_RSA_WITH_NULL_SHA = 0x0002;
@ -260,13 +308,11 @@ export {
const SSL_RSA_WITH_DES_CBC_MD5 = 0xFF82;
const SSL_RSA_WITH_3DES_EDE_CBC_MD5 = 0xFF83;
const TLS_EMPTY_RENEGOTIATION_INFO_SCSV = 0x00FF;
# --- This is a table of all known cipher specs.
# --- It can be used for detecting unknown ciphers and for
# --- converting the cipher spec constants into a human readable format.
## This is a table of all known cipher specs. It can be used for
## detecting unknown ciphers and for converting the cipher spec constants
## into a human readable format.
const cipher_desc: table[count] of string = {
# --- sslv20 ---
[SSLv20_CK_RC4_128_EXPORT40_WITH_MD5] =
"SSLv20_CK_RC4_128_EXPORT40_WITH_MD5",
[SSLv20_CK_RC4_128_WITH_MD5] = "SSLv20_CK_RC4_128_WITH_MD5",
@ -278,7 +324,6 @@ export {
"SSLv20_CK_DES_192_EDE3_CBC_WITH_MD5",
[SSLv20_CK_DES_64_CBC_WITH_MD5] = "SSLv20_CK_DES_64_CBC_WITH_MD5",
# --- TLS ---
[TLS_NULL_WITH_NULL_NULL] = "TLS_NULL_WITH_NULL_NULL",
[TLS_RSA_WITH_NULL_MD5] = "TLS_RSA_WITH_NULL_MD5",
[TLS_RSA_WITH_NULL_SHA] = "TLS_RSA_WITH_NULL_SHA",
@ -491,7 +536,8 @@ export {
[SSL_RSA_FIPS_WITH_DES_CBC_SHA_2] = "SSL_RSA_FIPS_WITH_DES_CBC_SHA_2",
[SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA_2] = "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA_2",
} &default="UNKNOWN";
## Mapping between the constants and string values for SSL/TLS errors.
const x509_errors: table[count] of string = {
[0] = "ok",
[1] = "unable to get issuer cert",
@ -526,8 +572,7 @@ export {
[30] = "akid issuer serial mismatch",
[31] = "keyusage no certsign",
[32] = "unable to get crl issuer",
[33] = "unhandled critical extension"
[33] = "unhandled critical extension",
};
}

View file

@ -1,3 +1,6 @@
##! Base SSL analysis script. This script logs information about the SSL/TLS
##! handshaking and encryption establishment process.
@load ./consts
module SSL;
@ -6,31 +9,45 @@ export {
redef enum Log::ID += { LOG };
type Info: record {
## Time when the SSL connection began.
ts: time &log;
uid: string &log;
id: conn_id &log;
## SSL/TLS version the server offered.
version: string &log &optional;
## SSL/TLS cipher suite the server chose.
cipher: string &log &optional;
## Value of the Server Name Indicator SSL/TLS extension. It
## indicates the server name that the client was requesting.
server_name: string &log &optional;
## Session ID offered by the client for session resumption.
session_id: string &log &optional;
## Subject of the X.509 certificate offered by the server.
subject: string &log &optional;
## NotValidBefore field value from the server certificate.
not_valid_before: time &log &optional;
## NotValidAfter field value from the serve certificate.
not_valid_after: time &log &optional;
## Last alert that was seen during the connection.
last_alert: string &log &optional;
## Full binary server certificate stored in DER format.
cert: string &optional;
## Chain of certificates offered by the server to validate its
## complete signing chain.
cert_chain: vector of string &optional;
## This stores the analyzer id used for the analyzer instance attached
## to each connection. It is not used for logging since it's a
## The analyzer ID used for the analyzer instance attached
## to each connection. It is not used for logging since it's a
## meaningless arbitrary number.
analyzer_id: count &optional;
};
## This is where the default root CA bundle is defined. By loading the
## The default root CA bundle. By loading the
## mozilla-ca-list.bro script it will be set to Mozilla's root CA list.
const root_certs: table[string] of string = {} &redef;
## If true, detach the SSL analyzer from the connection to prevent
## If true, detach the SSL analyzer from the connection to prevent
## continuing to process encrypted traffic. Helps with performance
## (especially with large file transfers).
const disable_analyzer_after_detection = T &redef;
@ -40,12 +57,9 @@ export {
## utility.
const openssl_util = "openssl" &redef;
## Event that can be handled to access the SSL
## record as it is sent on to the logging framework.
global log_ssl: event(rec: Info);
const ports = {
443/tcp, 563/tcp, 585/tcp, 614/tcp, 636/tcp,
989/tcp, 990/tcp, 992/tcp, 993/tcp, 995/tcp, 5223/tcp
} &redef;
}
redef record connection += {
@ -72,6 +86,11 @@ redef capture_filters += {
["xmpps"] = "tcp port 5223",
};
const ports = {
443/tcp, 563/tcp, 585/tcp, 614/tcp, 636/tcp,
989/tcp, 990/tcp, 992/tcp, 993/tcp, 995/tcp, 5223/tcp
};
redef dpd_config += {
[[ANALYZER_SSL]] = [$ports = ports]
};
@ -86,7 +105,7 @@ function set_session(c: connection)
if ( ! c?$ssl )
c$ssl = [$ts=network_time(), $uid=c$uid, $id=c$id, $cert_chain=vector()];
}
function finish(c: connection)
{
Log::write(SSL::LOG, c$ssl);
@ -98,29 +117,33 @@ function finish(c: connection)
event ssl_client_hello(c: connection, version: count, possible_ts: time, session_id: string, ciphers: count_set) &priority=5
{
set_session(c);
# Save the session_id if there is one set.
if ( session_id != /^\x00{32}$/ )
c$ssl$session_id = bytestring_to_hexstr(session_id);
}
event ssl_server_hello(c: connection, version: count, possible_ts: time, session_id: string, cipher: count, comp_method: count) &priority=5
{
set_session(c);
c$ssl$version = version_strings[version];
c$ssl$cipher = cipher_desc[cipher];
}
event x509_certificate(c: connection, cert: X509, is_server: bool, chain_idx: count, chain_len: count, der_cert: string) &priority=5
event x509_certificate(c: connection, is_orig: bool, cert: X509, chain_idx: count, chain_len: count, der_cert: string) &priority=5
{
set_session(c);
# We aren't doing anything with client certificates yet.
if ( is_orig )
return;
if ( chain_idx == 0 )
{
# Save the primary cert.
c$ssl$cert = der_cert;
# Also save other certificate information about the primary cert.
c$ssl$subject = cert$subject;
c$ssl$not_valid_before = cert$not_valid_before;
@ -132,20 +155,27 @@ event x509_certificate(c: connection, cert: X509, is_server: bool, chain_idx: co
c$ssl$cert_chain[|c$ssl$cert_chain|] = der_cert;
}
}
event ssl_extension(c: connection, code: count, val: string) &priority=5
event ssl_extension(c: connection, is_orig: bool, code: count, val: string) &priority=5
{
set_session(c);
if ( extensions[code] == "server_name" )
if ( is_orig && extensions[code] == "server_name" )
c$ssl$server_name = sub_bytes(val, 6, |val|);
}
event ssl_alert(c: connection, is_orig: bool, level: count, desc: count) &priority=5
{
set_session(c);
c$ssl$last_alert = alert_descriptions[desc];
}
event ssl_established(c: connection) &priority=5
{
set_session(c);
}
event ssl_established(c: connection) &priority=-5
{
finish(c);
@ -163,4 +193,4 @@ event protocol_violation(c: connection, atype: count, aid: count,
{
if ( c?$ssl )
finish(c);
}
}

View file

@ -1,6 +1,9 @@
##! Constants definitions for syslog.
module Syslog;
export {
## Mapping between the constants and string values for syslog facilities.
const facility_codes: table[count] of string = {
[0] = "KERN",
[1] = "USER",
@ -27,7 +30,8 @@ export {
[22] = "LOCAL6",
[23] = "LOCAL7",
} &default=function(c: count): string { return fmt("?-%d", c); };
## Mapping between the constants and string values for syslog severities.
const severity_codes: table[count] of string = {
[0] = "EMERG",
[1] = "ALERT",

View file

@ -1,4 +1,5 @@
##! Core script support for logging syslog messages.
##! Core script support for logging syslog messages. This script represents
##! one syslog message as one logged record.
@load ./consts
@ -8,19 +9,23 @@ export {
redef enum Log::ID += { LOG };
type Info: record {
## Timestamp of when the syslog message was seen.
ts: time &log;
uid: string &log;
id: conn_id &log;
## Protocol over which the message was seen.
proto: transport_proto &log;
## Syslog facility for the message.
facility: string &log;
## Syslog severity for the message.
severity: string &log;
## The plain text message.
message: string &log;
};
const ports = { 514/udp } &redef;
}
redef capture_filters += { ["syslog"] = "port 514" };
const ports = { 514/udp } &redef;
redef dpd_config += { [ANALYZER_SYSLOG_BINPAC] = [$ports = ports] };
redef likely_server_ports += { 514/udp };

View file

@ -18,7 +18,7 @@ export {
const local_nets: set[subnet] &redef;
## This is used for retrieving the subnet when you multiple
## :bro:id:`local_nets`. A membership query can be done with an
## :bro:id:`Site::local_nets`. A membership query can be done with an
## :bro:type:`addr` and the table will yield the subnet it was found
## within.
global local_nets_table: table[subnet] of subnet = {};

View file

@ -1,3 +1,12 @@
##! The controllee portion of the control framework. Load this script if remote
##! runtime control of the Bro process is desired.
##!
##! A controllee only needs to load the controllee script in addition
##! to the specific analysis scripts desired. It may also need a node
##! configured as a controller node in the communications nodes configuration::
##!
##! bro <scripts> frameworks/control/controllee
@load base/frameworks/control
# If an instance is a controllee, it implicitly needs to listen for remote
# connections.

View file

@ -1,3 +1,10 @@
##! This is a utility script that implements the controller interface for the
##! control framework. It's intended to be run to control a remote Bro
##! and then shutdown.
##!
##! It's intended to be used from the command line like this::
##! bro <scripts> frameworks/control/controller Control::host=<host_addr> Control::port=<host_port> Control::cmd=<command> [Control::arg=<arg>]
@load base/frameworks/control
@load base/frameworks/communication

View file

@ -8,7 +8,6 @@ module ProtocolDetector;
export {
redef enum Notice::Type += {
Off_Port_Protocol_Found, # raised for each connection found
Protocol_Found,
Server_Found,
};
@ -155,13 +154,10 @@ function report_protocols(c: connection)
{
if ( [a, c$id$resp_h, c$id$resp_p] in valids )
do_notice(c, a, valids[a, c$id$resp_h, c$id$resp_p]);
else if ( [a, 0.0.0.0, c$id$resp_p] in valids )
do_notice(c, a, valids[a, 0.0.0.0, c$id$resp_p]);
else
do_notice(c, a, NONE);
append_addl(c, analyzer_name(a));
}
delete conns[c$id];
@ -218,20 +214,6 @@ event protocol_confirmation(c: connection, atype: count, aid: count)
}
}
# event connection_analyzer_disabled(c: connection, analyzer: count)
# {
# if ( c$id !in conns )
# return;
#
# delete conns[c$id][analyzer];
# }
function append_proto_addl(c: connection)
{
for ( a in conns[c$id] )
append_addl(c, fmt_protocol(get_protocol(c, a)));
}
function found_protocol(c: connection, analyzer: count, protocol: string)
{
# Don't report anything running on a well-known port.

View file

@ -1,3 +1,6 @@
##! An example of using the metrics framework to collect connection metrics
##! aggregated into /24 CIDR ranges.
@load base/frameworks/metrics
@load base/utils/site

View file

@ -1,9 +1,17 @@
##! Provides an example of aggregating and limiting collection down to
##! only local networks. Additionally, the status code for the response from
##! the request is added into the metric.
@load base/frameworks/metrics
@load base/protocols/http
@load base/utils/site
redef enum Metrics::ID += {
## Measures HTTP requests indexed on both the request host and the response
## code from the server.
HTTP_REQUESTS_BY_STATUS_CODE,
## Currently unfinished and not working.
HTTP_REQUESTS_BY_HOST_HEADER,
};
@ -11,13 +19,13 @@ event bro_init()
{
# TODO: these are waiting on a fix with table vals + records before they will work.
#Metrics::add_filter(HTTP_REQUESTS_BY_HOST_HEADER,
# [$pred(index: Index) = { return Site:is_local_addr(index$host) },
# [$pred(index: Metrics::Index) = { return Site::is_local_addr(index$host); },
# $aggregation_mask=24,
# $break_interval=5mins]);
#
## Site::local_nets must be defined in order for this to actually do anything.
#Metrics::add_filter(HTTP_REQUESTS_BY_STATUS_CODE, [$aggregation_table=Site::local_nets_table,
# $break_interval=5mins]);
# $break_interval=1min]);
# Site::local_nets must be defined in order for this to actually do anything.
Metrics::add_filter(HTTP_REQUESTS_BY_STATUS_CODE, [$aggregation_table=Site::local_nets_table,
$break_interval=1min]);
}
event HTTP::log_http(rec: HTTP::Info)

View file

@ -1,3 +1,8 @@
##! Provides an example of using the metrics framework to collect the number
##! of times a specific server name indicator value is seen in SSL session
##! establishments. Names ending in google.com are being filtered out as an
##! example of the predicate based filtering in metrics filters.
@load base/frameworks/metrics
@load base/protocols/ssl

View file

@ -1,3 +1,7 @@
##! Provides the possibly to define software names that are interesting to
##! watch for changes. A notice is generated if software versions change on a
##! host.
@load base/frameworks/notice
@load base/frameworks/software
@ -5,24 +9,17 @@ module Software;
export {
redef enum Notice::Type += {
## For certain softwares, a version changing may matter. In that case,
## For certain software, a version changing may matter. In that case,
## this notice will be generated. Software that matters if the version
## changes can be configured with the
## :bro:id:`Software::interesting_version_changes` variable.
Software_Version_Change,
};
## Some software is more interesting when the version changes and this
## Some software is more interesting when the version changes and this is
## a set of all software that should raise a notice when a different
## version is seen on a host.
const interesting_version_changes: set[string] = {
"SSH"
} &redef;
## Some software is more interesting when the version changes and this
## a set of all software that should raise a notice when a different
## version is seen on a host.
const interesting_type_changes: set[string] = {};
const interesting_version_changes: set[string] = { } &redef;
}
event log_software(rec: Info)

View file

@ -1,3 +1,7 @@
##! Provides a variable to define vulnerable versions of software and if a
##! a version of that software as old or older than the defined version a
##! notice will be generated.
@load base/frameworks/notice
@load base/frameworks/software
@ -5,6 +9,7 @@ module Software;
export {
redef enum Notice::Type += {
## Indicates that a vulnerable version of software was detected.
Vulnerable_Version,
};
@ -18,6 +23,7 @@ event log_software(rec: Info)
if ( rec$name in vulnerable_versions &&
cmp_versions(rec$version, vulnerable_versions[rec$name]) <= 0 )
{
NOTICE([$note=Vulnerable_Version, $src=rec$host, $msg=software_fmt(rec)]);
NOTICE([$note=Vulnerable_Version, $src=rec$host,
$msg=fmt("A vulnerable version of software was detected: %s", software_fmt(rec))]);
}
}

View file

@ -15,7 +15,7 @@ export {
alert: AlertData &log;
};
## This can convert a Barnyard :bro:type:`PacketID` value to a
## This can convert a Barnyard :bro:type:`Barnyard2::PacketID` value to a
## :bro:type:`conn_id` value in the case that you might need to index
## into an existing data structure elsewhere within Bro.
global pid2cid: function(p: PacketID): conn_id;

View file

@ -17,7 +17,7 @@ export {
redef enum Notice::Type += {
## Report if the detected capture loss exceeds the percentage
## threshold
## threshold.
Too_Much_Loss
};
@ -42,9 +42,9 @@ export {
const watch_interval = 15mins &redef;
## The percentage of missed data that is considered "too much"
## when the :bro:enum:`Too_Much_Loss` notice should be generated.
## The value is expressed as a double between 0 and 1 with 1 being
## 100%
## when the :bro:enum:`CaptureLoss::Too_Much_Loss` notice should be
## generated. The value is expressed as a double between 0 and 1 with 1
## being 100%
const too_much_loss: double = 0.1 &redef;
}

View file

@ -1,4 +1,4 @@
##!
##! Log the loaded scripts.
module LoadedScripts;

View file

@ -2,14 +2,13 @@
module Profiling;
## Set the profiling output file.
redef profiling_file = open_log_file("prof");
export {
## Cheap profiling every 15 seconds.
redef profiling_interval = 15 secs &redef;
}
## Set the cheap profiling interval.
redef profiling_interval = 15 secs;
# Expensive profiling every 5 minutes.
## Set the expensive profiling interval.
redef expensive_profiling_multiple = 20;
event bro_init()

View file

@ -0,0 +1,83 @@
##! Log memory/packet/lag statistics. Differs from profiling.bro in that this
##! is lighter-weight (much less info, and less load to generate).
@load base/frameworks/notice
module Stats;
export {
redef enum Log::ID += { LOG };
## How often stats are reported.
const stats_report_interval = 1min &redef;
type Info: record {
## Timestamp for the measurement.
ts: time &log;
## Peer that generated this log. Mostly for clusters.
peer: string &log;
## Amount of memory currently in use in MB.
mem: count &log;
## Number of packets processed since the last stats interval.
pkts_proc: count &log;
## Number of events that been processed since the last stats interval.
events_proc: count &log;
## Number of events that have been queued since the last stats interval.
events_queued: count &log;
## Lag between the wall clock and packet timestamps if reading live traffic.
lag: interval &log &optional;
## Number of packets received since the last stats interval if reading
## live traffic.
pkts_recv: count &log &optional;
## Number of packets dropped since the last stats interval if reading
## live traffic.
pkts_dropped: count &log &optional;
## Number of packets seen on the link since the last stats interval
## if reading live traffic.
pkts_link: count &log &optional;
};
## Event to catch stats as they are written to the logging stream.
global log_stats: event(rec: Info);
}
event bro_init() &priority=5
{
Log::create_stream(Stats::LOG, [$columns=Info, $ev=log_stats]);
}
event check_stats(last_ts: time, last_ns: NetStats, last_res: bro_resources)
{
local now = current_time();
local ns = net_stats();
local res = resource_usage();
if ( bro_is_terminating() )
# No more stats will be written or scheduled when Bro is
# shutting down.
return;
local info: Info = [$ts=now, $peer=peer_description, $mem=res$mem/1000000,
$pkts_proc=res$num_packets - last_res$num_packets,
$events_proc=res$num_events_dispatched - last_res$num_events_dispatched,
$events_queued=res$num_events_queued - last_res$num_events_queued];
if ( reading_live_traffic() )
{
info$lag = now - network_time();
# Someone's going to have to explain what this is and add a field to the Info record.
# info$util = 100.0*((res$user_time + res$system_time) - (last_res$user_time + last_res$system_time))/(now-last_ts);
info$pkts_recv = ns$pkts_recvd - last_ns$pkts_recvd;
info$pkts_dropped = ns$pkts_dropped - last_ns$pkts_dropped;
info$pkts_link = ns$pkts_link - last_ns$pkts_link;
}
Log::write(Stats::LOG, info);
schedule stats_report_interval { check_stats(now, ns, res) };
}
event bro_init()
{
schedule stats_report_interval { check_stats(current_time(), net_stats(), resource_usage()) };
}

View file

@ -10,7 +10,8 @@ export {
## This event can be generated externally to this script if on-demand
## tracefile rotation is required with the caveat that the script doesn't
## currently attempt to get back on schedule automatically and the next
## trim will likely won't happen on the :bro:id:`trim_interval`.
## trim will likely won't happen on the
## :bro:id:`TrimTraceFile::trim_interval`.
global go: event(first_trim: bool);
}

View file

@ -8,8 +8,10 @@
module Known;
export {
## The known-hosts logging stream identifier.
redef enum Log::ID += { HOSTS_LOG };
## The record type which contains the column fields of the known-hosts log.
type HostsInfo: record {
## The timestamp at which the host was detected.
ts: time &log;
@ -19,7 +21,7 @@ export {
};
## The hosts whose existence should be logged and tracked.
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS
## See :bro:type:`Host` for possible choices.
const host_tracking = LOCAL_HOSTS &redef;
## The set of all known addresses to store for preventing duplicate
@ -28,7 +30,9 @@ export {
## Maintain the list of known hosts for 24 hours so that the existence
## of each individual address is logged each day.
global known_hosts: set[addr] &create_expire=1day &synchronized &redef;
## An event that can be handled to access the :bro:type:`Known::HostsInfo`
## record as it is sent on to the logging framework.
global log_known_hosts: event(rec: HostsInfo);
}

View file

@ -8,29 +8,41 @@
module Known;
export {
## The known-services logging stream identifier.
redef enum Log::ID += { SERVICES_LOG };
## The record type which contains the column fields of the known-services
## log.
type ServicesInfo: record {
## The time at which the service was detected.
ts: time &log;
## The host address on which the service is running.
host: addr &log;
## The port number on which the service is running.
port_num: port &log;
## The transport-layer protocol which the service uses.
port_proto: transport_proto &log;
## A set of protocols that match the service's connection payloads.
service: set[string] &log;
done: bool &default=F;
};
## The hosts whose services should be tracked and logged.
## See :bro:type:`Host` for possible choices.
const service_tracking = LOCAL_HOSTS &redef;
## Tracks the set of daily-detected services for preventing the logging
## of duplicates, but can also be inspected by other scripts for
## different purposes.
global known_services: set[addr, port] &create_expire=1day &synchronized;
## Event that can be handled to access the :bro:type:`Known::ServicesInfo`
## record as it is sent on to the logging framework.
global log_known_services: event(rec: ServicesInfo);
}
redef record connection += {
## This field is to indicate whether or not the processing for detecting
## and logging the service for this connection is complete.
# This field is to indicate whether or not the processing for detecting
# and logging the service for this connection is complete.
known_services_done: bool &default=F;
};

View file

@ -7,7 +7,7 @@ module FTP;
export {
redef enum Notice::Type += {
## This indicates that a successful response to a "SITE EXEC"
## Indicates that a successful response to a "SITE EXEC"
## command/arg pair was seen.
Site_Exec_Success,
};

View file

@ -12,8 +12,10 @@ module FTP;
export {
redef enum Software::Type += {
FTP_CLIENT,
FTP_SERVER,
## Identifier for FTP clients in the software framework.
CLIENT,
## Not currently implemented.
SERVER,
};
}
@ -21,7 +23,7 @@ event ftp_request(c: connection, command: string, arg: string) &priority=4
{
if ( command == "CLNT" )
{
local si = Software::parse(arg, c$id$orig_h, FTP_CLIENT);
local si = Software::parse(arg, c$id$orig_h, CLIENT);
Software::found(c$id, si);
}
}

View file

@ -1,15 +1,18 @@
##! This script takes MD5 sums of files transferred over HTTP and checks them with
##! Team Cymru's Malware Hash Registry (http://www.team-cymru.org/Services/MHR/).
##! Detect file downloads over HTTP that have MD5 sums matching files in Team
##! Cymru's Malware Hash Registry (http://www.team-cymru.org/Services/MHR/).
##! By default, not all file transfers will have MD5 sums calculated. Read the
##! documentation for the :doc:base/protocols/http/file-hash.bro script to see how to
##! configure which transfers will have hashes calculated.
##! documentation for the :doc:base/protocols/http/file-hash.bro script to see
##! how to configure which transfers will have hashes calculated.
@load base/frameworks/notice
@load base/protocols/http
module HTTP;
export {
redef enum Notice::Type += {
## If the MD5 sum of a file transferred over HTTP
## The MD5 sum of a file transferred over HTTP matched in the
## malware hash registry.
Malware_Hash_Registry_Match
};
}

View file

@ -1,4 +1,4 @@
##! Intelligence based HTTP detections.
##! Intelligence based HTTP detections. Not yet working!
@load base/protocols/http/main
@load base/protocols/http/utils

View file

@ -12,12 +12,14 @@ export {
SQL_Injection_Attacker,
## Indicates that a host was seen to have SQL injection attacks against
## it. This is tracked by IP address as opposed to hostname.
SQL_Injection_Attack_Against,
SQL_Injection_Victim,
};
redef enum Metrics::ID += {
SQL_ATTACKER,
SQL_ATTACKS_AGAINST,
## Metric to track SQL injection attackers.
SQLI_ATTACKER,
## Metrics to track SQL injection victims.
SQLI_VICTIM,
};
redef enum Tags += {
@ -30,17 +32,17 @@ export {
COOKIE_SQLI,
};
## This defines the threshold that determines if an SQL injection attack
## Defines the threshold that determines if an SQL injection attack
## is ongoing based on the number of requests that appear to be SQL
## injection attacks.
const sqli_requests_threshold = 50 &redef;
## Interval at which to watch for the :bro:id:`sqli_requests_threshold`
## variable to be crossed. At the end of each interval the counter is
## reset.
## Interval at which to watch for the
## :bro:id:`HTTP::sqli_requests_threshold` variable to be crossed.
## At the end of each interval the counter is reset.
const sqli_requests_interval = 5min &redef;
## This regular expression is used to match URI based SQL injections
## Regular expression is used to match URI based SQL injections.
const match_sql_injection_uri =
/[\?&][^[:blank:]\x00-\x37\|]+?=[\-[:alnum:]%]+([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x37]|\/\*.*?\*\/|\)?;)+.*?([hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x37]|\/\*.*?\*\/)+/
| /[\?&][^[:blank:]\x00-\x37\|]+?=[\-0-9%]+([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x37]|\/\*.*?\*\/|\)?;)+([xX]?[oO][rR]|[nN]?[aA][nN][dD])([[:blank:]\x00-\x37]|\/\*.*?\*\/)+['"]?(([^a-zA-Z&]+)?=|[eE][xX][iI][sS][tT][sS])/
@ -56,14 +58,14 @@ event bro_init() &priority=3
# determine when it looks like an actual attack and how to respond when
# thresholds are crossed.
Metrics::add_filter(SQL_ATTACKER, [$log=F,
Metrics::add_filter(SQLI_ATTACKER, [$log=F,
$notice_threshold=sqli_requests_threshold,
$break_interval=sqli_requests_interval,
$note=SQL_Injection_Attacker]);
Metrics::add_filter(SQL_ATTACKS_AGAINST, [$log=F,
$notice_threshold=sqli_requests_threshold,
$break_interval=sqli_requests_interval,
$note=SQL_Injection_Attack_Against]);
Metrics::add_filter(SQLI_VICTIM, [$log=F,
$notice_threshold=sqli_requests_threshold,
$break_interval=sqli_requests_interval,
$note=SQL_Injection_Victim]);
}
event http_request(c: connection, method: string, original_URI: string,
@ -73,7 +75,7 @@ event http_request(c: connection, method: string, original_URI: string,
{
add c$http$tags[URI_SQLI];
Metrics::add_data(SQL_ATTACKER, [$host=c$id$orig_h], 1);
Metrics::add_data(SQL_ATTACKS_AGAINST, [$host=c$id$resp_h], 1);
Metrics::add_data(SQLI_ATTACKER, [$host=c$id$orig_h], 1);
Metrics::add_data(SQLI_VICTIM, [$host=c$id$resp_h], 1);
}
}

View file

@ -1,3 +1,5 @@
##! Detect and log web applications through the software framework.
@load base/frameworks/signatures
@load base/frameworks/software
@load base/protocols/http
@ -10,10 +12,12 @@ redef Signatures::ignored_ids += /^webapp-/;
export {
redef enum Software::Type += {
## Identifier for web applications in the software framework.
WEB_APPLICATION,
};
redef record Software::Info += {
## Most root URL where the software was discovered.
url: string &optional &log;
};
}

View file

@ -1,5 +1,5 @@
##! This script take advantage of a few ways that installed plugin information
##! leaks from web browsers.
##! Detect browser plugins as they leak through requests to Omniture
##! advertising servers.
@load base/protocols/http
@load base/frameworks/software
@ -13,6 +13,7 @@ export {
};
redef enum Software::Type += {
## Identifier for browser plugins in the software framework.
BROWSER_PLUGIN
};
}

View file

@ -6,8 +6,11 @@ module HTTP;
export {
redef enum Software::Type += {
## Identifier for web servers in the software framework.
SERVER,
## Identifier for app servers in the software framework.
APPSERVER,
## Identifier for web browsers in the software framework.
BROWSER,
};

View file

@ -1,4 +1,4 @@
##! This script extracts and logs variables from cookies sent by clients
##! Extracts and logs variables names from cookies sent by clients.
@load base/protocols/http/main
@load base/protocols/http/utils
@ -6,6 +6,7 @@
module HTTP;
redef record Info += {
## Variable names extracted from all cookies.
cookie_vars: vector of string &optional &log;
};

View file

@ -1,10 +1,12 @@
##! This script extracts and logs variables from the requested URI
##! Extracts and log variables from the requested URI in the default HTTP
##! logging stream.
@load base/protocols/http
module HTTP;
redef record Info += {
## Variable names from the URI.
uri_vars: vector of string &optional &log;
};

View file

@ -1,3 +1,5 @@
##! Detect hosts which are doing password guessing attacks and/or password
##! bruteforcing over SSH.
@load base/protocols/ssh
@load base/frameworks/metrics
@ -9,17 +11,17 @@ module SSH;
export {
redef enum Notice::Type += {
## Indicates that a host has been identified as crossing the
## :bro:id:`password_guesses_limit` threshold with heuristically
## :bro:id:`SSH::password_guesses_limit` threshold with heuristically
## determined failed logins.
Password_Guessing,
## Indicates that a host previously identified as a "password guesser"
## has now had a heuristically successful login attempt.
## has now had a heuristically successful login attempt. This is not
## currently implemented.
Login_By_Password_Guesser,
};
redef enum Metrics::ID += {
## This metric is to measure failed logins with the hope of detecting
## bruteforcing hosts.
## Metric is to measure failed logins.
FAILED_LOGIN,
};
@ -37,7 +39,7 @@ export {
## client subnets and the yield value represents server subnets.
const ignore_guessers: table[subnet] of subnet &redef;
## Keeps track of hosts identified as guessing passwords.
## Tracks hosts identified as guessing passwords.
global password_guessers: set[addr]
&read_expire=guessing_timeout+1hr &synchronized &redef;
}

View file

@ -1,5 +1,4 @@
##! This implements all of the additional information and geodata detections
##! for SSH analysis.
##! Geodata based detections for SSH analysis.
@load base/frameworks/notice
@load base/protocols/ssh
@ -19,8 +18,8 @@ export {
remote_location: geo_location &log &optional;
};
## The set of countries for which you'd like to throw notices upon
## successful login
## The set of countries for which you'd like to generate notices upon
## successful login.
const watched_countries: set[string] = {"RO"} &redef;
}

View file

@ -10,9 +10,9 @@ module SSH;
export {
redef enum Notice::Type += {
## Generated if a login originates or responds with a host and the
## Generated if a login originates or responds with a host where the
## reverse hostname lookup resolves to a name matched by the
## :bro:id:`interesting_hostnames` regular expression.
## :bro:id:`SSH::interesting_hostnames` regular expression.
Interesting_Hostname_Login,
};
@ -36,7 +36,9 @@ event SSH::heuristic_successful_login(c: connection)
if ( interesting_hostnames in hostname )
{
NOTICE([$note=Interesting_Hostname_Login,
$msg=fmt("Interesting login from hostname: %s", hostname),
$msg=fmt("Possible SSH login involving a %s %s with an interesting hostname.",
Site::is_local_addr(host) ? "local" : "remote",
host == c$id$orig_h ? "client" : "server"),
$sub=hostname, $conn=c]);
}
}

View file

@ -1,4 +1,4 @@
##! This script extracts SSH client and server information from SSH
##! Extracts SSH client and server information from SSH
##! connections and forwards it to the software framework.
@load base/frameworks/software
@ -7,7 +7,9 @@ module SSH;
export {
redef enum Software::Type += {
## Identifier for SSH clients in the software framework.
SERVER,
## Identifier for SSH servers in the software framework.
CLIENT,
};
}

View file

@ -1,4 +1,4 @@
##! This script calculates MD5 sums for server DER formatted certificates.
##! Calculate MD5 sums for server DER formatted certificates.
@load base/protocols/ssl
@ -6,15 +6,16 @@ module SSL;
export {
redef record Info += {
## MD5 sum of the raw server certificate.
cert_hash: string &log &optional;
};
}
event x509_certificate(c: connection, cert: X509, is_server: bool, chain_idx: count, chain_len: count, der_cert: string) &priority=4
event x509_certificate(c: connection, is_orig: bool, cert: X509, chain_idx: count, chain_len: count, der_cert: string) &priority=4
{
# We aren't tracking client certificates yet and we are also only tracking
# the primary cert. Watch that this came from an SSL analyzed session too.
if ( ! is_server || chain_idx != 0 || ! c?$ssl )
if ( is_orig || chain_idx != 0 || ! c?$ssl )
return;
c$ssl$cert_hash = md5_hash(der_cert);

View file

@ -1,6 +1,6 @@
##! This script can be used to generate notices when X.509 certificates over
##! SSL/TLS are expired or going to expire based on the date and time values
##! stored within the certificate.
##! Generate notices when X.509 certificates over SSL/TLS are expired or
##! going to expire soon based on the date and time values stored within the
##! certificate.
@load base/protocols/ssl
@load base/frameworks/notice
@ -24,19 +24,21 @@ export {
## The category of hosts you would like to be notified about which have
## certificates that are going to be expiring soon. By default, these
## notices will be suppressed by the notice framework for 1 day.
## notices will be suppressed by the notice framework for 1 day after
## a particular certificate has had a notice generated.
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS
const notify_certs_expiration = LOCAL_HOSTS &redef;
## The time before a certificate is going to expire that you would like to
## start receiving :bro:enum:`Certificate_Expires_Soon` notices.
## start receiving :bro:enum:`SSL::Certificate_Expires_Soon` notices.
const notify_when_cert_expiring_in = 30days &redef;
}
event x509_certificate(c: connection, cert: X509, is_server: bool, chain_idx: count, chain_len: count, der_cert: string) &priority=3
event x509_certificate(c: connection, is_orig: bool, cert: X509, chain_idx: count, chain_len: count, der_cert: string) &priority=3
{
# If this isn't the host cert or we aren't interested in the server, just return.
if ( chain_idx != 0 ||
if ( is_orig ||
chain_idx != 0 ||
! c$ssl?$cert_hash ||
! addr_matches_host(c$id$resp_h, notify_certs_expiration) )
return;

View file

@ -2,7 +2,7 @@
##! after being converted to PEM files. The certificates will be stored in
##! a single file, one for local certificates and one for remote certificates.
##!
##! A couple of things to think about with this script::
##! ..note::
##!
##! - It doesn't work well on a cluster because each worker will write its
##! own certificate files and no duplicate checking is done across
@ -20,15 +20,15 @@
module SSL;
export {
## Setting to control if host certificates offered by the defined hosts
## Control if host certificates offered by the defined hosts
## will be written to the PEM certificates file.
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS
const extract_certs_pem = LOCAL_HOSTS &redef;
}
## This is an internally maintained variable to prevent relogging of
## certificates that have already been seen. It is indexed on an md5 sum of
## the certificate.
# This is an internally maintained variable to prevent relogging of
# certificates that have already been seen. It is indexed on an md5 sum of
# the certificate.
global extracted_certs: set[string] = set() &read_expire=1hr &redef;
event ssl_established(c: connection) &priority=5

View file

@ -1,5 +1,4 @@
##! This script can be used to log information about certificates while
##! attempting to avoid duplicate logging.
##! Log information about certificates while attempting to avoid duplicate logging.
@load base/utils/directions-and-hosts
@load base/protocols/ssl
@ -36,6 +35,8 @@ export {
## in the set is for storing the DER formatted certificate's MD5 hash.
global certs: set[addr, string] &create_expire=1day &synchronized &redef;
## Event that can be handled to access the loggable record as it is sent
## on to the logging framework.
global log_known_certs: event(rec: CertsInfo);
}
@ -44,10 +45,10 @@ event bro_init() &priority=5
Log::create_stream(Known::CERTS_LOG, [$columns=CertsInfo, $ev=log_known_certs]);
}
event x509_certificate(c: connection, cert: X509, is_server: bool, chain_idx: count, chain_len: count, der_cert: string) &priority=3
event x509_certificate(c: connection, is_orig: bool, cert: X509, chain_idx: count, chain_len: count, der_cert: string) &priority=3
{
# Make sure this is the server cert and we have a hash for it.
if ( chain_idx != 0 || ! c$ssl?$cert_hash )
if ( is_orig || chain_idx != 0 || ! c$ssl?$cert_hash )
return;
local host = c$id$resp_h;

View file

@ -14,8 +14,7 @@ export {
};
redef record Info += {
## This stores and logs the result of certificate validation for
## this connection.
## Result of certificate validation for this connection.
validation_status: string &log &optional;
};

View file

@ -1,9 +1 @@
##! Local site policy loaded only by the manager in a cluster.
@load base/frameworks/notice
# If you are running a cluster you should define your Notice::policy here
# so that notice processing occurs on the manager.
redef Notice::policy += {
};
##! Local site policy loaded only by the manager if Bro is running as a cluster.

View file

@ -1,2 +1 @@
##! Local site policy loaded only by the proxies if Bro is running as a cluster.

View file

@ -1,22 +1,29 @@
##! Local site policy. Customize as appropriate. This file will not be
##! overwritten when upgrading or reinstalling.
##! Local site policy. Customize as appropriate.
##!
##! This file will not be overwritten when upgrading or reinstalling!
# Load the script to log which script were loaded during each run
# This script logs which scripts were loaded during each run.
@load misc/loaded-scripts
# Apply the default tuning scripts for common tuning settings.
@load tuning/defaults
# Vulnerable versions of software to generate notices for when discovered.
# Generate notices when vulnerable versions of software are discovered.
# The default is to only monitor software found in the address space defined
# as "local". Refer to the software framework's documentation for more
# information.
@load frameworks/software/vulnerable
# Example vulnerable software. This needs to be updated and maintained over
# time as new vulnerabilities are discovered.
redef Software::vulnerable_versions += {
["Flash"] = [$major=10,$minor=2,$minor2=153,$addl="1"],
["Java"] = [$major=1,$minor=6,$minor2=0,$addl="22"],
};
# Detect software changing (e.g. attacker installing hacked SSHD).
@load frameworks/software/version-changes
# This adds signatures to detect cleartext forward and reverse windows shells.
redef signature_files += "frameworks/signatures/detect-windows-shells.sig";
@ -25,13 +32,15 @@ redef signature_files += "frameworks/signatures/detect-windows-shells.sig";
# redef Notice::policy += { [$action = Notice::ACTION_ALARM, $priority = 0] };
# Load all of the scripts that detect software in various protocols.
@load protocols/http/software
#@load protocols/http/detect-webapps
@load protocols/ftp/software
@load protocols/smtp/software
@load protocols/ssh/software
@load protocols/http/software
# The detect-webapps script could possibly cause performance trouble when
# running on live traffic. Enable it cautiously.
#@load protocols/http/detect-webapps
# Load the script to detect DNS results pointing toward your Site::local_nets
# This script detects DNS results pointing toward your Site::local_nets
# where the name is not part of your local DNS zone and is being hosted
# externally. Requires that the Site::local_zones variable is defined.
@load protocols/dns/detect-external-names
@ -39,15 +48,12 @@ redef signature_files += "frameworks/signatures/detect-windows-shells.sig";
# Script to detect various activity in FTP sessions.
@load protocols/ftp/detect
# Detect software changing (e.g. attacker installing hacked SSHD).
@load frameworks/software/version-changes
# Scripts that do asset tracking.
@load protocols/conn/known-hosts
@load protocols/conn/known-services
@load protocols/ssl/known-certs
# Load the script to enable SSL/TLS certificate validation.
# This script enables SSL/TLS certificate validation.
@load protocols/ssl/validate-certs
# If you have libGeoIP support built in, do some geographic detections and
@ -60,5 +66,5 @@ redef signature_files += "frameworks/signatures/detect-windows-shells.sig";
# Detect MD5 sums in Team Cymru's Malware Hash Registry.
@load protocols/http/detect-MHR
# Detect SQL injection attacks
# Detect SQL injection attacks.
@load protocols/http/detect-sqli

View file

@ -26,6 +26,7 @@
@load misc/capture-loss.bro
@load misc/loaded-scripts.bro
@load misc/profiling.bro
@load misc/stats.bro
@load misc/trim-trace-file.bro
@load protocols/conn/known-hosts.bro
@load protocols/conn/known-services.bro