Merge remote-tracking branch 'origin/master' into topic/bif_cleanup

Conflicts:
	src/bro.bif
This commit is contained in:
Matthias Vallentin 2012-01-18 14:56:53 -08:00
commit 6c255d13ff
247 changed files with 6038 additions and 3503 deletions

View file

@ -44,7 +44,7 @@ event bro_init() &priority=9
{
if ( n$node_type == WORKER && n$proxy == node )
Communication::nodes[i] =
[$host=n$ip, $connect=F, $class=i, $events=worker2proxy_events];
[$host=n$ip, $connect=F, $class=i, $sync=T, $auth=T, $events=worker2proxy_events];
# accepts connections from the previous one.
# (This is not ideal for setups with many proxies)

View file

@ -1,43 +1,30 @@
##! This is a utility script that sends the current values of all &redef'able
##! consts to a remote Bro then sends the :bro:id:`configuration_update` event
##! and terminates processing.
##!
##! Intended to be used from the command line like this when starting a controller::
##!
##! bro <scripts> frameworks/control/controller Control::host=<host_addr> Control::port=<host_port> Control::cmd=<command> [Control::arg=<arg>]
##!
##! A controllee only needs to load the controllee script in addition
##! to the specific analysis scripts desired. It may also need a node
##! configured as a controller node in the communications nodes configuration::
##!
##! bro <scripts> frameworks/control/controllee
##!
##! To use the framework as a controllee, it only needs to be loaded and
##! the controlled node need to accept all events in the "Control::" namespace
##! from the host where the control actions will be performed from along with
##! using the "control" class.
##! The control framework provides the foundation for providing "commands"
##! that can be taken remotely at runtime to modify a running Bro instance
##! or collect information from the running instance.
module Control;
export {
## This is the address of the host that will be controlled.
## The address of the host that will be controlled.
const host = 0.0.0.0 &redef;
## This is the port of the host that will be controlled.
## The port of the host that will be controlled.
const host_port = 0/tcp &redef;
## This is the command that is being done. It's typically set on the
## command line and influences whether this instance starts up as a
## controller or controllee.
## The command that is being done. It's typically set on the
## command line.
const cmd = "" &redef;
## This can be used by commands that take an argument.
const arg = "" &redef;
## Events that need to be handled by controllers.
const controller_events = /Control::.*_request/ &redef;
## Events that need to be handled by controllees.
const controllee_events = /Control::.*_response/ &redef;
## These are the commands that can be given on the command line for
## The commands that can currently be given on the command line for
## remote control.
const commands: set[string] = {
"id_value",
@ -45,15 +32,15 @@ export {
"net_stats",
"configuration_update",
"shutdown",
};
} &redef;
## Variable IDs that are to be ignored by the update process.
const ignore_ids: set[string] = {
};
const ignore_ids: set[string] = { };
## Event for requesting the value of an ID (a variable).
global id_value_request: event(id: string);
## Event for returning the value of an ID after an :bro:id:`id_request` event.
## Event for returning the value of an ID after an
## :bro:id:`Control::id_value_request` event.
global id_value_response: event(id: string, val: string);
## Requests the current communication status.
@ -68,7 +55,8 @@ export {
## Inform the remote Bro instance that it's configuration may have been updated.
global configuration_update_request: event();
## This event is a wrapper and alias for the :bro:id:`configuration_update_request` event.
## This event is a wrapper and alias for the
## :bro:id:`Control::configuration_update_request` event.
## This event is also a primary hooking point for the control framework.
global configuration_update: event();
## Message in response to a configuration update request.

View file

@ -80,15 +80,15 @@ signature irc_server_reply {
tcp-state responder
}
signature irc_sig3 {
signature irc_server_to_server1 {
ip-proto == tcp
payload /(.*\x0a)*(\x20)*[Ss][Ee][Rr][Vv][Ee][Rr](\x20)+.+\x0a/
payload /(|.*[\r\n]) *[Ss][Ee][Rr][Vv][Ee][Rr] +[^ ]+ +[0-9]+ +:.+[\r\n]/
}
signature irc_sig4 {
signature irc_server_to_server2 {
ip-proto == tcp
payload /(.*\x0a)*(\x20)*[Ss][Ee][Rr][Vv][Ee][Rr](\x20)+.+\x0a/
requires-reverse-signature irc_sig3
payload /(|.*[\r\n]) *[Ss][Ee][Rr][Vv][Ee][Rr] +[^ ]+ +[0-9]+ +:.+[\r\n]/
requires-reverse-signature irc_server_to_server1
enable "irc"
}

View file

@ -11,7 +11,7 @@
# user_name
# file_name
# file_md5
# x509_cert - DER encoded, not PEM (ascii armored)
# x509_md5
# Example tags:
# infrastructure
@ -25,6 +25,7 @@
module Intel;
export {
## The intel logging stream identifier.
redef enum Log::ID += { LOG };
redef enum Notice::Type += {
@ -33,72 +34,117 @@ export {
Detection,
};
## Record type used for logging information from the intelligence framework.
## Primarily for problems or oddities with inserting and querying data.
## This is important since the content of the intelligence framework can
## change quite dramatically during runtime and problems may be introduced
## into the data.
type Info: record {
## The current network time.
ts: time &log;
## Represents the severity of the message.
## This value should be one of: "info", "warn", "error"
level: string &log;
## The message.
message: string &log;
};
## Record to represent metadata associated with a single piece of
## intelligence.
type MetaData: record {
## A description for the data.
desc: string &optional;
## A URL where more information may be found about the intelligence.
url: string &optional;
## The time at which the data was first declared to be intelligence.
first_seen: time &optional;
## When this data was most recent inserted into the framework.
latest_seen: time &optional;
## Arbitrary text tags for the data.
tags: set[string];
};
## Record to represent a singular piece of intelligence.
type Item: record {
## If the data is an IP address, this hold the address.
ip: addr &optional;
## If the data is textual, this holds the text.
str: string &optional;
## If the data is numeric, this holds the number.
num: int &optional;
## The subtype of the data for when either the $str or $num fields are
## given. If one of those fields are given, this field must be present.
subtype: string &optional;
## The next five fields are temporary until a better model for
## attaching metadata to an intelligence item is created.
desc: string &optional;
url: string &optional;
first_seen: time &optional;
latest_seen: time &optional;
tags: set[string];
## These single string tags are throw away until pybroccoli supports sets
## These single string tags are throw away until pybroccoli supports sets.
tag1: string &optional;
tag2: string &optional;
tag3: string &optional;
};
## Record model used for constructing queries against the intelligence
## framework.
type QueryItem: record {
ip: addr &optional;
str: string &optional;
num: int &optional;
subtype: string &optional;
## If an IP address is being queried for, this field should be given.
ip: addr &optional;
## If a string is being queried for, this field should be given.
str: string &optional;
## If numeric data is being queried for, this field should be given.
num: int &optional;
## If either a string or number is being queried for, this field should
## indicate the subtype of the data.
subtype: string &optional;
or_tags: set[string] &optional;
and_tags: set[string] &optional;
## A set of tags where if a single metadata record attached to an item
## has any one of the tags defined in this field, it will match.
or_tags: set[string] &optional;
## A set of tags where a single metadata record attached to an item
## must have all of the tags defined in this field.
and_tags: set[string] &optional;
## The predicate can be given when searching for a match. It will
## be tested against every :bro:type:`MetaData` item associated with
## the data being matched on. If it returns T a single time, the
## matcher will consider that the item has matched.
pred: function(meta: Intel::MetaData): bool &optional;
## be tested against every :bro:type:`Intel::MetaData` item associated
## with the data being matched on. If it returns T a single time, the
## matcher will consider that the item has matched. This field can
## be used for constructing arbitrarily complex queries that may not
## be possible with the $or_tags or $and_tags fields.
pred: function(meta: Intel::MetaData): bool &optional;
};
## Function to insert data into the intelligence framework.
##
## item: The data item.
##
## Returns: T if the data was successfully inserted into the framework,
## otherwise it returns F.
global insert: function(item: Item): bool;
## A wrapper for the :bro:id:`Intel::insert` function. This is primarily
## used as the external API for inserting data into the intelligence
## using Broccoli.
global insert_event: event(item: Item);
## Function for matching data within the intelligence framework.
global matcher: function(item: QueryItem): bool;
type MetaDataStore: table[count] of MetaData;
type DataStore: record {
ip_data: table[addr] of MetaDataStore;
## The first string is the actual value and the second string is the subtype.
string_data: table[string, string] of MetaDataStore;
int_data: table[int, string] of MetaDataStore;
};
global data_store: DataStore;
}
type MetaDataStore: table[count] of MetaData;
type DataStore: record {
ip_data: table[addr] of MetaDataStore;
# The first string is the actual value and the second string is the subtype.
string_data: table[string, string] of MetaDataStore;
int_data: table[int, string] of MetaDataStore;
};
global data_store: DataStore;
event bro_init()
{
Log::create_stream(Intel::LOG, [$columns=Info]);

View file

@ -1 +1,2 @@
@load ./scp
@load ./sftp

View file

@ -47,6 +47,10 @@ export {
## copy of the rotated-log to each destination in the set. This
## table can be modified at run-time.
global scp_destinations: table[Writer, string] of set[SCPDestination];
## Default naming format for timestamps embedded into log filenames
## that use the SCP rotator.
const scp_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef;
}
function scp_postprocessor(info: Log::RotationInfo): bool
@ -56,7 +60,11 @@ function scp_postprocessor(info: Log::RotationInfo): bool
local command = "";
for ( d in scp_destinations[info$writer, info$path] )
command += fmt("scp %s %s@%s:%s;", info$fname, d$user, d$host, d$path);
{
local dst = fmt("%s/%s.%s.log", d$path, info$path,
strftime(Log::scp_rotation_date_format, info$open));
command += fmt("scp %s %s@%s:%s;", info$fname, d$user, d$host, dst);
}
command += fmt("/bin/rm %s", info$fname);
system(command);

View file

@ -0,0 +1,73 @@
##! This script defines a postprocessing function that can be applied
##! to a logging filter in order to automatically SFTP
##! a log stream (or a subset of it) to a remote host at configurable
##! rotation time intervals. Generally, to use this functionality
##! you must handle the :bro:id:`bro_init` event and do the following
##! in your handler:
##!
##! 1) Create a new :bro:type:`Log::Filter` record that defines a name/path,
##! rotation interval, and set the ``postprocessor`` to
##! :bro:id:`Log::sftp_postprocessor`.
##! 2) Add the filter to a logging stream using :bro:id:`Log::add_filter`.
##! 3) Add a table entry to :bro:id:`Log::sftp_destinations` for the filter's
##! writer/path pair which defines a set of :bro:type:`Log::SFTPDestination`
##! records.
module Log;
export {
## Securely transfers the rotated-log to all the remote hosts
## defined in :bro:id:`Log::sftp_destinations` and then deletes
## the local copy of the rotated-log. It's not active when
## reading from trace files.
##
## info: A record holding meta-information about the log file to be
## postprocessed.
##
## Returns: True if sftp system command was initiated or
## if no destination was configured for the log as described
## by *info*.
global sftp_postprocessor: function(info: Log::RotationInfo): bool;
## A container that describes the remote destination for the SFTP command,
## comprised of the username, host, and path at which to upload the file.
type SFTPDestination: record {
## The remote user to log in as. A trust mechanism should be
## pre-established.
user: string;
## The remote host to which to transfer logs.
host: string;
## The path/directory on the remote host to send logs.
path: string;
};
## A table indexed by a particular log writer and filter path, that yields
## a set remote destinations. The :bro:id:`Log::sftp_postprocessor`
## function queries this table upon log rotation and performs a secure
## transfer of the rotated-log to each destination in the set. This
## table can be modified at run-time.
global sftp_destinations: table[Writer, string] of set[SFTPDestination];
## Default naming format for timestamps embedded into log filenames
## that use the SFTP rotator.
const sftp_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef;
}
function sftp_postprocessor(info: Log::RotationInfo): bool
{
if ( reading_traces() || [info$writer, info$path] !in sftp_destinations )
return T;
local command = "";
for ( d in sftp_destinations[info$writer, info$path] )
{
local dst = fmt("%s/%s.%s.log", d$path, info$path,
strftime(Log::sftp_rotation_date_format, info$open));
command += fmt("echo put %s %s | sftp -b - %s@%s;", info$fname, dst,
d$user, d$host);
}
command += fmt("/bin/rm %s", info$fname);
system(command);
return T;
}

View file

@ -21,8 +21,9 @@ export {
## Separator between set elements.
const set_separator = "," &redef;
## String to use for empty fields.
const empty_field = "-" &redef;
## String to use for empty fields. This should be different from
## *unset_field* to make the output non-ambigious.
const empty_field = "(empty)" &redef;
## String to use for an unset &optional field.
const unset_field = "-" &redef;

View file

@ -13,11 +13,11 @@
module Metrics;
export {
## This value allows a user to decide how large of result groups the
## workers should transmit values.
## Allows a user to decide how large of result groups the
## workers should transmit values for cluster metric aggregation.
const cluster_send_in_groups_of = 50 &redef;
## This is the percent of the full threshold value that needs to be met
## The percent of the full threshold value that needs to be met
## on a single worker for that worker to send the value to its manager in
## order for it to request a global view for that value. There is no
## requirement that the manager requests a global view for the index
@ -25,11 +25,11 @@ export {
## recently.
const cluster_request_global_view_percent = 0.1 &redef;
## This event is sent by the manager in a cluster to initiate the
## Event sent by the manager in a cluster to initiate the
## collection of metrics values for a filter.
global cluster_filter_request: event(uid: string, id: ID, filter_name: string);
## This event is sent by nodes that are collecting metrics after receiving
## Event sent by nodes that are collecting metrics after receiving
## a request for the metric filter from the manager.
global cluster_filter_response: event(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool);
@ -40,12 +40,12 @@ export {
global cluster_index_request: event(uid: string, id: ID, filter_name: string, index: Index);
## This event is sent by nodes in response to a
## :bro:id:`cluster_index_request` event.
## :bro:id:`Metrics::cluster_index_request` event.
global cluster_index_response: event(uid: string, id: ID, filter_name: string, index: Index, val: count);
## This is sent by workers to indicate that they crossed the percent of the
## current threshold by the percentage defined globally in
## :bro:id:`cluster_request_global_view_percent`
## :bro:id:`Metrics::cluster_request_global_view_percent`
global cluster_index_intermediate_response: event(id: Metrics::ID, filter_name: string, index: Metrics::Index, val: count);
## This event is scheduled internally on workers to send result chunks.

View file

@ -1,13 +1,16 @@
##! This is the implementation of the metrics framework.
##! The metrics framework provides a way to count and measure data.
@load base/frameworks/notice
module Metrics;
export {
## The metrics logging stream identifier.
redef enum Log::ID += { LOG };
## Identifiers for metrics to collect.
type ID: enum {
## Blank placeholder value.
NOTHING,
};
@ -15,10 +18,13 @@ export {
## current value to the logging stream.
const default_break_interval = 15mins &redef;
## This is the interval for how often notices will happen after they have
## already fired.
## This is the interval for how often threshold based notices will happen
## after they have already fired.
const renotice_interval = 1hr &redef;
## Represents a thing which is having metrics collected for it. An instance
## of this record type and a :bro:type:`Metrics::ID` together represent a
## single measurement.
type Index: record {
## Host is the value to which this metric applies.
host: addr &optional;
@ -37,17 +43,30 @@ export {
network: subnet &optional;
} &log;
## The record type that is used for logging metrics.
type Info: record {
## Timestamp at which the metric was "broken".
ts: time &log;
## What measurement the metric represents.
metric_id: ID &log;
## The name of the filter being logged. :bro:type:`Metrics::ID` values
## can have multiple filters which represent different perspectives on
## the data so this is necessary to understand the value.
filter_name: string &log;
## What the metric value applies to.
index: Index &log;
## The simple numeric value of the metric.
value: count &log;
};
# TODO: configure a metrics filter logging stream to log the current
# TODO: configure a metrics filter logging stream to log the current
# metrics configuration in case someone is looking through
# old logs and the configuration has changed since then.
## Filters define how the data from a metric is aggregated and handled.
## Filters can be used to set how often the measurements are cut or "broken"
## and logged or how the data within them is aggregated. It's also
## possible to disable logging and use filters for thresholding.
type Filter: record {
## The :bro:type:`Metrics::ID` that this filter applies to.
id: ID &optional;
@ -62,7 +81,7 @@ export {
aggregation_mask: count &optional;
## This is essentially a mapping table between addresses and subnets.
aggregation_table: table[subnet] of subnet &optional;
## The interval at which the metric should be "broken" and written
## The interval at which this filter should be "broken" and written
## to the logging stream. The counters are also reset to zero at
## this time so any threshold based detection needs to be set to a
## number that should be expected to happen within this period.
@ -79,7 +98,7 @@ export {
notice_threshold: count &optional;
## A series of thresholds at which to generate notices.
notice_thresholds: vector of count &optional;
## How often this notice should be raised for this metric index. It
## How often this notice should be raised for this filter. It
## will be generated everytime it crosses a threshold, but if the
## $break_interval is set to 5mins and this is set to 1hr the notice
## only be generated once per hour even if something crosses the
@ -87,15 +106,43 @@ export {
notice_freq: interval &optional;
};
## Function to associate a metric filter with a metric ID.
##
## id: The metric ID that the filter should be associated with.
##
## filter: The record representing the filter configuration.
global add_filter: function(id: ID, filter: Filter);
## Add data into a :bro:type:`Metrics::ID`. This should be called when
## a script has measured some point value and is ready to increment the
## counters.
##
## id: The metric ID that the data represents.
##
## index: The metric index that the value is to be added to.
##
## increment: How much to increment the counter by.
global add_data: function(id: ID, index: Index, increment: count);
## Helper function to represent a :bro:type:`Metrics::Index` value as
## a simple string
##
## index: The metric index that is to be converted into a string.
##
## Returns: A string reprentation of the metric index.
global index2str: function(index: Index): string;
# This is the event that is used to "finish" metrics and adapt the metrics
# framework for clustered or non-clustered usage.
## Event that is used to "finish" metrics and adapt the metrics
## framework for clustered or non-clustered usage.
##
## ..note: This is primarily intended for internal use.
global log_it: event(filter: Filter);
## Event to access metrics records as they are passed to the logging framework.
global log_metrics: event(rec: Info);
## Type to store a table of metrics values. Interal use only!
type MetricTable: table[Index] of count &default=0;
}
redef record Notice::Info += {
@ -105,7 +152,6 @@ redef record Notice::Info += {
global metric_filters: table[ID] of vector of Filter = table();
global filter_store: table[ID, string] of Filter = table();
type MetricTable: table[Index] of count &default=0;
# This is indexed by metric ID and stream filter name.
global store: table[ID, string] of MetricTable = table() &default=table();

View file

@ -31,6 +31,7 @@ export {
## Add a helper to the notice policy for looking up GeoIP data.
redef Notice::policy += {
[$pred(n: Notice::Info) = { return (n$note in Notice::lookup_location_types); },
$action = ACTION_ADD_GEODATA,
$priority = 10],
};
}

View file

@ -11,8 +11,8 @@ module Notice;
export {
redef enum Action += {
## Indicate that the generated email should be addressed to the
## appropriate email addresses as found in the
## :bro:id:`Site::addr_to_emails` variable based on the relevant
## appropriate email addresses as found by the
## :bro:id:`Site::get_emails` function based on the relevant
## address or addresses indicated in the notice.
ACTION_EMAIL_ADMIN
};

View file

@ -7,7 +7,7 @@ module Notice;
export {
redef enum Action += {
## Indicates that the notice should be sent to the pager email address
## configured in the :bro:id:`mail_page_dest` variable.
## configured in the :bro:id:`Notice::mail_page_dest` variable.
ACTION_PAGE
};

View file

@ -10,18 +10,21 @@ module Notice;
export {
## Activate pretty-printed alarm summaries.
const pretty_print_alarms = T &redef;
## Address to send the pretty-printed reports to. Default if not set is
## :bro:id:`Notice::mail_dest`.
const mail_dest_pretty_printed = "" &redef;
## If an address from one of these networks is reported, we mark
## the entry with an addition quote symbol (that is, ">"). Many MUAs
## the entry with an additional quote symbol (i.e., ">"). Many MUAs
## then highlight such lines differently.
global flag_nets: set[subnet] &redef;
## Function that renders a single alarm. Can be overidden.
global pretty_print_alarm: function(out: file, n: Info) &redef;
## Force generating mail file, even if reading from traces or no mail
## destination is defined. This is mainly for testing.
global force_email_summaries = F &redef;
}
# We maintain an old-style file recording the pretty-printed alarms.
@ -32,6 +35,9 @@ global pp_alarms_open: bool = F;
# Returns True if pretty-printed alarm summaries are activated.
function want_pp() : bool
{
if ( force_email_summaries )
return T;
return (pretty_print_alarms && ! reading_traces()
&& (mail_dest != "" || mail_dest_pretty_printed != ""));
}
@ -41,38 +47,49 @@ function pp_open()
{
if ( pp_alarms_open )
return;
pp_alarms_open = T;
pp_alarms = open(pp_alarms_name);
local dest = mail_dest_pretty_printed != "" ? mail_dest_pretty_printed
: mail_dest;
local headers = email_headers("Alarm summary", dest);
write_file(pp_alarms, headers + "\n");
}
# Closes and mails out the current output file.
function pp_send()
function pp_send(rinfo: Log::RotationInfo)
{
if ( ! pp_alarms_open )
return;
write_file(pp_alarms, "\n\n--\n[Automatically generated]\n\n");
close(pp_alarms);
system(fmt("/bin/cat %s | %s -t -oi && /bin/rm %s",
pp_alarms_name, sendmail, pp_alarms_name));
pp_alarms_open = F;
local from = strftime("%H:%M:%S", rinfo$open);
local to = strftime("%H:%M:%S", rinfo$close);
local subject = fmt("Alarm summary from %s-%s", from, to);
local dest = mail_dest_pretty_printed != "" ? mail_dest_pretty_printed
: mail_dest;
if ( dest == "" )
# No mail destination configured, just leave the file alone. This is mainly for
# testing.
return;
local headers = email_headers(subject, dest);
local header_name = pp_alarms_name + ".tmp";
local header = open(header_name);
write_file(header, headers + "\n");
close(header);
system(fmt("/bin/cat %s %s | %s -t -oi && /bin/rm -f %s %s",
header_name, pp_alarms_name, sendmail, header_name, pp_alarms_name));
}
# Postprocessor function that triggers the email.
function pp_postprocessor(info: Log::RotationInfo): bool
{
if ( want_pp() )
pp_send();
pp_send(info);
return T;
}
@ -80,7 +97,7 @@ event bro_init()
{
if ( ! want_pp() )
return;
# This replaces the standard non-pretty-printing filter.
Log::add_filter(Notice::ALARM_LOG,
[$name="alarm-mail", $writer=Log::WRITER_NONE,
@ -92,13 +109,13 @@ event notice(n: Notice::Info) &priority=-5
{
if ( ! want_pp() )
return;
if ( ACTION_LOG !in n$actions )
if ( ACTION_ALARM !in n$actions )
return;
if ( ! pp_alarms_open )
pp_open();
pretty_print_alarm(pp_alarms, n);
}
@ -108,12 +125,12 @@ function do_msg(out: file, n: Info, line1: string, line2: string, line3: string,
@ifdef ( Notice::ACTION_ADD_GEODATA ) # Make tests happy, cyclic dependency.
if ( n?$remote_location && n$remote_location?$country_code )
country = fmt(" (remote location %s)", n$remote_location$country_code);
@endif
@endif
line1 = cat(line1, country);
local resolved = "";
if ( host1 != 0.0.0.0 )
resolved = fmt("%s # %s = %s", resolved, host1, name1);
@ -133,64 +150,64 @@ function do_msg(out: file, n: Info, line1: string, line2: string, line3: string,
function pretty_print_alarm(out: file, n: Info)
{
local pdescr = "";
@if ( Cluster::is_enabled() )
pdescr = "local";
if ( n?$src_peer )
pdescr = n$src_peer?$descr ? n$src_peer$descr : fmt("%s", n$src_peer$host);
pdescr = fmt("<%s> ", pdescr);
@endif
local msg = fmt( "%s%s", pdescr, n$msg);
local who = "";
local h1 = 0.0.0.0;
local h2 = 0.0.0.0;
local orig_p = "";
local resp_p = "";
if ( n?$id )
{
orig_p = fmt(":%s", n$id$orig_p);
resp_p = fmt(":%s", n$id$resp_p);
h1 = n$id$orig_h;
h2 = n$id$resp_h;
who = fmt("%s:%s -> %s:%s", h1, n$id$orig_p, h2, n$id$resp_p);
}
if ( n?$src && n?$dst )
else if ( n?$src && n?$dst )
{
h1 = n$src;
h2 = n$dst;
who = fmt("%s%s -> %s%s", h1, orig_p, h2, resp_p);
if ( n?$uid )
who = fmt("%s (uid %s)", who, n$uid );
who = fmt("%s -> %s", h1, h2);
}
else if ( n?$src )
{
local p = "";
if ( n?$p )
p = fmt(":%s", n$p);
h1 = n$src;
who = fmt("%s%s", h1, p);
who = fmt("%s%s", h1, (n?$p ? fmt(":%s", n$p) : ""));
}
if ( n?$uid )
who = fmt("%s (uid %s)", who, n$uid );
local flag = (h1 in flag_nets || h2 in flag_nets);
local line1 = fmt(">%s %D %s %s", (flag ? ">" : " "), network_time(), n$note, who);
local line2 = fmt(" %s", msg);
local line3 = n?$sub ? fmt(" %s", n$sub) : "";
if ( h1 == 0.0.0.0 )
{
do_msg(out, n, line1, line2, line3, h1, "", h2, "");
return;
}
if ( reading_traces() )
{
do_msg(out, n, line1, line2, line3, h1, "<skipped>", h2, "<skipped>");
return;
}
when ( local h1name = lookup_addr(h1) )
{
if ( h2 == 0.0.0.0 )

View file

@ -1,32 +1,52 @@
##! Loading this script extends the :bro:enum:`Notice::ACTION_EMAIL` action
##! by appending to the email the hostnames associated with
##! :bro:type:`Notice::Info`'s *src* and *dst* fields as determined by a
##! DNS lookup.
@load ../main
module Notice;
# This probably doesn't actually work due to the async lookup_addr.
# We have to store references to the notices here because the when statement
# clones the frame which doesn't give us access to modify values outside
# of it's execution scope. (we get a clone of the notice instead of a
# reference to the original notice)
global tmp_notice_storage: table[string] of Notice::Info &create_expire=max_email_delay+10secs;
event Notice::notice(n: Notice::Info) &priority=10
{
if ( ! n?$src && ! n?$dst )
return;
# This should only be done for notices that are being sent to email.
if ( ACTION_EMAIL !in n$actions )
return;
# I'm not recovering gracefully from the when statements because I want
# the notice framework to detect that something has exceeded the maximum
# allowed email delay and tell the user.
local uid = unique_id("");
tmp_notice_storage[uid] = n;
local output = "";
if ( n?$src )
{
add n$email_delay_tokens["hostnames-src"];
when ( local src_name = lookup_addr(n$src) )
{
output = string_cat("orig_h/src hostname: ", src_name, "\n");
n$email_body_sections[|n$email_body_sections|] = output;
output = string_cat("orig/src hostname: ", src_name, "\n");
tmp_notice_storage[uid]$email_body_sections[|tmp_notice_storage[uid]$email_body_sections|] = output;
delete tmp_notice_storage[uid]$email_delay_tokens["hostnames-src"];
}
}
if ( n?$dst )
{
add n$email_delay_tokens["hostnames-dst"];
when ( local dst_name = lookup_addr(n$dst) )
{
output = string_cat("resp_h/dst hostname: ", dst_name, "\n");
n$email_body_sections[|n$email_body_sections|] = output;
output = string_cat("resp/dst hostname: ", dst_name, "\n");
tmp_notice_storage[uid]$email_body_sections[|tmp_notice_storage[uid]$email_body_sections|] = output;
delete tmp_notice_storage[uid]$email_delay_tokens["hostnames-dst"];
}
}
}

View file

@ -7,9 +7,9 @@
module Notice;
export {
redef enum Log::ID += {
redef enum Log::ID += {
## This is the primary logging stream for notices.
LOG,
LOG,
## This is the notice policy auditing log. It records what the current
## notice policy is at Bro init time.
POLICY_LOG,
@ -17,7 +17,7 @@ export {
ALARM_LOG,
};
## Scripts creating new notices need to redef this enum to add their own
## Scripts creating new notices need to redef this enum to add their own
## specific notice types which would then get used when they call the
## :bro:id:`NOTICE` function. The convention is to give a general category
## along with the specific notice separating words with underscores and
@ -28,14 +28,14 @@ export {
## Notice reporting a count of how often a notice occurred.
Tally,
};
## These are values representing actions that can be taken with notices.
type Action: enum {
## Indicates that there is no action to be taken.
ACTION_NONE,
## Indicates that the notice should be sent to the notice logging stream.
ACTION_LOG,
## Indicates that the notice should be sent to the email address(es)
## Indicates that the notice should be sent to the email address(es)
## configured in the :bro:id:`Notice::mail_dest` variable.
ACTION_EMAIL,
## Indicates that the notice should be alarmed. A readable ASCII
@ -46,12 +46,12 @@ export {
## duplicate notice suppression that the notice framework does.
ACTION_NO_SUPPRESS,
};
## The notice framework is able to do automatic notice supression by
## The notice framework is able to do automatic notice supression by
## utilizing the $identifier field in :bro:type:`Notice::Info` records.
## Set this to "0secs" to completely disable automated notice suppression.
const default_suppression_interval = 1hrs &redef;
type Info: record {
## An absolute time indicating when the notice occurred, defaults
## to the current network time.
@ -73,14 +73,18 @@ export {
## reference to the actual connection will be deleted after applying
## the notice policy.
iconn: icmp_conn &optional;
## The type of the notice.
## The transport protocol. Filled automatically when either conn, iconn
## or p is specified.
proto: transport_proto &log &optional;
## The :bro:type:`Notice::Type` of the notice.
note: Type &log;
## The human readable message for the notice.
msg: string &log &optional;
## The human readable sub-message.
sub: string &log &optional;
## Source address, if we don't have a :bro:type:`conn_id`.
src: addr &log &optional;
## Destination address.
@ -89,33 +93,39 @@ export {
p: port &log &optional;
## Associated count, or perhaps a status code.
n: count &log &optional;
## Peer that raised this notice.
src_peer: event_peer &optional;
## Textual description for the peer that raised this notice.
peer_descr: string &log &optional;
## The actions which have been applied to this notice.
actions: set[Notice::Action] &log &optional;
## These are policy items that returned T and applied their action
## to the notice.
policy_items: set[count] &log &optional;
## By adding chunks of text into this element, other scripts can
## expand on notices that are being emailed. The normal way to add text
## is to extend the vector by handling the :bro:id:`Notice::notice`
## event and modifying the notice in place.
email_body_sections: vector of string &default=vector();
email_body_sections: vector of string &optional;
## Adding a string "token" to this set will cause the notice framework's
## built-in emailing functionality to delay sending the email until
## either the token has been removed or the email has been delayed
## for :bro:id:`Notice::max_email_delay`.
email_delay_tokens: set[string] &optional;
## This field is to be provided when a notice is generated for the
## purpose of deduplicating notices. The identifier string should
## be unique for a single instance of the notice. This field should be
## filled out in almost all cases when generating notices to define
## be unique for a single instance of the notice. This field should be
## filled out in almost all cases when generating notices to define
## when a notice is conceptually a duplicate of a previous notice.
##
## For example, an SSL certificate that is going to expire soon should
## always have the same identifier no matter the client IP address
##
## For example, an SSL certificate that is going to expire soon should
## always have the same identifier no matter the client IP address
## that connected and resulted in the certificate being exposed. In
## this case, the resp_h, resp_p, and hash of the certificate would be
## used to create this value. The hash of the cert is included
@ -124,19 +134,19 @@ export {
## Another example might be a host downloading a file which triggered
## a notice because the MD5 sum of the file it downloaded was known
## by some set of intelligence. In that case, the orig_h (client)
## and MD5 sum would be used in this field to dedup because if the
## and MD5 sum would be used in this field to dedup because if the
## same file is downloaded over and over again you really only want to
## know about it a single time. This makes it possible to send those
## notices to email without worrying so much about sending thousands
## of emails.
identifier: string &optional;
## This field indicates the length of time that this
## unique notice should be suppressed. This field is automatically
## unique notice should be suppressed. This field is automatically
## filled out and should not be written to by any other script.
suppress_for: interval &log &optional;
};
## Ignored notice types.
const ignored_types: set[Notice::Type] = {} &redef;
## Emailed notice types.
@ -145,10 +155,10 @@ export {
const alarmed_types: set[Notice::Type] = {} &redef;
## Types that should be suppressed for the default suppression interval.
const not_suppressed_types: set[Notice::Type] = {} &redef;
## This table can be used as a shorthand way to modify suppression
## This table can be used as a shorthand way to modify suppression
## intervals for entire notice types.
const type_suppression_intervals: table[Notice::Type] of interval = {} &redef;
## This is the record that defines the items that make up the notice policy.
type PolicyItem: record {
## This is the exact positional order in which the
@ -160,13 +170,13 @@ export {
priority: count &log &default=5;
## An action given to the notice if the predicate return true.
action: Notice::Action &log &default=ACTION_NONE;
## The pred (predicate) field is a function that returns a boolean T
## or F value. If the predicate function return true, the action in
## this record is applied to the notice that is given as an argument
## to the predicate function. If no predicate is supplied, it's
## The pred (predicate) field is a function that returns a boolean T
## or F value. If the predicate function return true, the action in
## this record is applied to the notice that is given as an argument
## to the predicate function. If no predicate is supplied, it's
## assumed that the PolicyItem always applies.
pred: function(n: Notice::Info): bool &log &optional;
## Indicates this item should terminate policy processing if the
## Indicates this item should terminate policy processing if the
## predicate returns T.
halt: bool &log &default=F;
## This defines the length of time that this particular notice should
@ -188,33 +198,35 @@ export {
[$pred(n: Notice::Info) = { return (n$note in Notice::emailed_types); },
$action = ACTION_EMAIL,
$priority = 8],
[$pred(n: Notice::Info) = {
if (n$note in Notice::type_suppression_intervals)
[$pred(n: Notice::Info) = {
if (n$note in Notice::type_suppression_intervals)
{
n$suppress_for=Notice::type_suppression_intervals[n$note];
return T;
}
return F;
return F;
},
$action = ACTION_NONE,
$priority = 8],
[$action = ACTION_LOG,
$priority = 0],
} &redef;
## Local system sendmail program.
const sendmail = "/usr/sbin/sendmail" &redef;
## Email address to send notices with the :bro:enum:`Notice::ACTION_EMAIL`
## action or to send bulk alarm logs on rotation with
## :bro:enum:`Notice::ACTION_ALARM`.
const mail_dest = "" &redef;
## Address that emails will be from.
const mail_from = "Big Brother <bro@localhost>" &redef;
## Reply-to address used in outbound email.
const reply_to = "" &redef;
## Text string prefixed to the subject of all emails sent out.
const mail_subject_prefix = "[Bro]" &redef;
## The maximum amount of time a plugin can delay email from being sent.
const max_email_delay = 15secs &redef;
## A log postprocessing function that implements emailing the contents
## of a log upon rotation to any configured :bro:id:`Notice::mail_dest`.
@ -225,8 +237,8 @@ export {
## Returns: True.
global log_mailing_postprocessor: function(info: Log::RotationInfo): bool;
## This is the event that is called as the entry point to the
## notice framework by the global :bro:id:`NOTICE` function. By the time
## This is the event that is called as the entry point to the
## notice framework by the global :bro:id:`NOTICE` function. By the time
## this event is generated, default values have already been filled out in
## the :bro:type:`Notice::Info` record and synchronous functions in the
## :bro:id:`Notice::sync_functions` have already been called. The notice
@ -235,19 +247,19 @@ export {
## n: The record containing notice data.
global notice: event(n: Info);
## This is a set of functions that provide a synchronous way for scripts
## This is a set of functions that provide a synchronous way for scripts
## extending the notice framework to run before the normal event based
## notice pathway that most of the notice framework takes. This is helpful
## in cases where an action against a notice needs to happen immediately
## and can't wait the short time for the event to bubble up to the top of
## the event queue. An example is the IP address dropping script that
## can block IP addresses that have notices generated because it
## the event queue. An example is the IP address dropping script that
## can block IP addresses that have notices generated because it
## needs to operate closer to real time than the event queue allows it to.
## Normally the event based extension model using the
## Normally the event based extension model using the
## :bro:id:`Notice::notice` event will work fine if there aren't harder
## real time constraints.
const sync_functions: set[function(n: Notice::Info)] = set() &redef;
## This event is generated when a notice begins to be suppressed.
##
## n: The record containing notice data regarding the notice type
@ -265,7 +277,7 @@ export {
## n: The record containing notice data regarding the notice type
## that was being suppressed.
global end_suppression: event(n: Notice::Info);
## Call this function to send a notice in an email. It is already used
## by default with the built in :bro:enum:`Notice::ACTION_EMAIL` and
## :bro:enum:`Notice::ACTION_PAGE` actions.
@ -307,22 +319,22 @@ function per_notice_suppression_interval(t: table[Notice::Type, string] of Notic
local n: Notice::Type;
local s: string;
[n,s] = idx;
local suppress_time = t[n,s]$suppress_for - (network_time() - t[n,s]$ts);
if ( suppress_time < 0secs )
suppress_time = 0secs;
# If there is no more suppression time left, the notice needs to be sent
# to the end_suppression event.
if ( suppress_time == 0secs )
event Notice::end_suppression(t[n,s]);
return suppress_time;
}
# This is the internally maintained notice suppression table. It's
# This is the internally maintained notice suppression table. It's
# indexed on the Notice::Type and the $identifier field from the notice.
global suppressing: table[Type, string] of Notice::Info = {}
global suppressing: table[Type, string] of Notice::Info = {}
&create_expire=0secs
&expire_func=per_notice_suppression_interval;
@ -349,7 +361,7 @@ function log_mailing_postprocessor(info: Log::RotationInfo): bool
event bro_init() &priority=5
{
Log::create_stream(Notice::LOG, [$columns=Info, $ev=log_notice]);
Log::create_stream(Notice::ALARM_LOG, [$columns=Notice::Info]);
# If Bro is configured for mailing notices, set up mailing for alarms.
# Make sure that this alarm log is also output as text so that it can
@ -390,25 +402,49 @@ function email_headers(subject_desc: string, dest: string): string
return header_text;
}
event delay_sending_email(n: Notice::Info, dest: string, extend: bool)
{
email_notice_to(n, dest, extend);
}
function email_notice_to(n: Notice::Info, dest: string, extend: bool)
{
if ( reading_traces() || dest == "" )
return;
if ( extend )
{
if ( |n$email_delay_tokens| > 0 )
{
# If we still are within the max_email_delay, keep delaying.
if ( n$ts + max_email_delay > network_time() )
{
schedule 1sec { delay_sending_email(n, dest, extend) };
return;
}
else
{
event reporter_info(network_time(),
fmt("Notice email delay tokens weren't released in time (%s).", n$email_delay_tokens),
"");
}
}
}
local email_text = email_headers(fmt("%s", n$note), dest);
# First off, finish the headers and include the human readable messages
# then leave a blank line after the message.
email_text = string_cat(email_text, "\nMessage: ", n$msg);
if ( n?$sub )
email_text = string_cat(email_text, "\nSub-message: ", n$sub);
email_text = string_cat(email_text, "\n\n");
# Next, add information about the connection if it exists.
if ( n?$id )
{
email_text = string_cat(email_text, "Connection: ",
email_text = string_cat(email_text, "Connection: ",
fmt("%s", n$id$orig_h), ":", fmt("%d", n$id$orig_p), " -> ",
fmt("%s", n$id$resp_h), ":", fmt("%d", n$id$resp_p), "\n");
if ( n?$uid )
@ -416,17 +452,18 @@ function email_notice_to(n: Notice::Info, dest: string, extend: bool)
}
else if ( n?$src )
email_text = string_cat(email_text, "Address: ", fmt("%s", n$src), "\n");
# Add the extended information if it's requested.
if ( extend )
{
email_text = string_cat(email_text, "\nEmail Extensions\n");
email_text = string_cat(email_text, "----------------\n");
for ( i in n$email_body_sections )
{
email_text = string_cat(email_text, "******************\n");
email_text = string_cat(email_text, n$email_body_sections[i], "\n");
}
}
email_text = string_cat(email_text, "\n\n--\n[Automatically generated]\n\n");
piped_exec(fmt("%s -t -oi", sendmail), email_text);
}
@ -439,10 +476,10 @@ event notice(n: Notice::Info) &priority=-5
Log::write(Notice::LOG, n);
if ( ACTION_ALARM in n$actions )
Log::write(Notice::ALARM_LOG, n);
# Normally suppress further notices like this one unless directed not to.
# n$identifier *must* be specified for suppression to function at all.
if ( n?$identifier &&
if ( n?$identifier &&
ACTION_NO_SUPPRESS !in n$actions &&
[n$note, n$identifier] !in suppressing &&
n$suppress_for != 0secs )
@ -465,7 +502,7 @@ function is_being_suppressed(n: Notice::Info): bool
else
return F;
}
# Executes a script with all of the notice fields put into the
# new process' environment as "BRO_ARG_<field>" variables.
function execute_with_notice(cmd: string, n: Notice::Info)
@ -474,9 +511,9 @@ function execute_with_notice(cmd: string, n: Notice::Info)
#local tgs = tags(n);
#system_env(cmd, tags);
}
# This is run synchronously as a function before all of the other
# notice related functions and events. It also modifies the
# This is run synchronously as a function before all of the other
# notice related functions and events. It also modifies the
# :bro:type:`Notice::Info` record in place.
function apply_policy(n: Notice::Info)
{
@ -491,7 +528,7 @@ function apply_policy(n: Notice::Info)
if ( ! n?$uid )
n$uid = n$conn$uid;
}
if ( n?$id )
{
if ( ! n?$src )
@ -502,8 +539,12 @@ function apply_policy(n: Notice::Info)
n$p = n$id$resp_p;
}
if ( n?$p )
n$proto = get_port_transport_proto(n$p);
if ( n?$iconn )
{
n$proto = icmp;
if ( ! n?$src )
n$src = n$iconn$orig_h;
if ( ! n?$dst )
@ -513,15 +554,20 @@ function apply_policy(n: Notice::Info)
if ( ! n?$src_peer )
n$src_peer = get_event_peer();
if ( ! n?$peer_descr )
n$peer_descr = n$src_peer?$descr ?
n$peer_descr = n$src_peer?$descr ?
n$src_peer$descr : fmt("%s", n$src_peer$host);
if ( ! n?$actions )
n$actions = set();
if ( ! n?$email_body_sections )
n$email_body_sections = vector();
if ( ! n?$email_delay_tokens )
n$email_delay_tokens = set();
if ( ! n?$policy_items )
n$policy_items = set();
for ( i in ordered_policy )
{
# If there's no predicate or the predicate returns F.
@ -529,51 +575,51 @@ function apply_policy(n: Notice::Info)
{
add n$actions[ordered_policy[i]$action];
add n$policy_items[int_to_count(i)];
# If the predicate matched and there was a suppression interval,
# If the predicate matched and there was a suppression interval,
# apply it to the notice now.
if ( ordered_policy[i]?$suppress_for )
n$suppress_for = ordered_policy[i]$suppress_for;
# If the policy item wants to halt policy processing, do it now!
if ( ordered_policy[i]$halt )
break;
}
}
# Apply the suppression time after applying the policy so that policy
# items can give custom suppression intervals. If there is no
# items can give custom suppression intervals. If there is no
# suppression interval given yet, the default is applied.
if ( ! n?$suppress_for )
n$suppress_for = default_suppression_interval;
# Delete the connection record if it's there so we aren't sending that
# to remote machines. It can cause problems due to the size of the
# to remote machines. It can cause problems due to the size of the
# connection record.
if ( n?$conn )
delete n$conn;
if ( n?$iconn )
delete n$iconn;
}
# Create the ordered notice policy automatically which will be used at runtime
# Create the ordered notice policy automatically which will be used at runtime
# for prioritized matching of the notice policy.
event bro_init() &priority=10
{
# Create the policy log here because it's only written to in this handler.
Log::create_stream(Notice::POLICY_LOG, [$columns=PolicyItem]);
local tmp: table[count] of set[PolicyItem] = table();
for ( pi in policy )
{
if ( pi$priority < 0 || pi$priority > 10 )
Reporter::fatal("All Notice::PolicyItem priorities must be within 0 and 10");
if ( pi$priority !in tmp )
tmp[pi$priority] = set();
add tmp[pi$priority][pi];
}
local rev_count = vector(10,9,8,7,6,5,4,3,2,1,0);
for ( i in rev_count )
{
@ -589,7 +635,7 @@ event bro_init() &priority=10
}
}
}
function internal_NOTICE(n: Notice::Info)
{
# Suppress this notice if necessary.

View file

@ -1,5 +1,5 @@
##! This script provides the framework for software version detection and
##! parsing, but doesn't actually do any detection on it's own. It relys on
##! parsing but doesn't actually do any detection on it's own. It relys on
##! other protocol specific scripts to parse out software from the protocols
##! that they analyze. The entry point for providing new software detections
##! to this framework is through the :bro:id:`Software::found` function.
@ -10,39 +10,44 @@
module Software;
export {
## The software logging stream identifier.
redef enum Log::ID += { LOG };
## Scripts detecting new types of software need to redef this enum to add
## their own specific software types which would then be used when they
## create :bro:type:`Software::Info` records.
type Type: enum {
## A placeholder type for when the type of software is not known.
UNKNOWN,
OPERATING_SYSTEM,
DATABASE_SERVER,
# There are a number of ways to detect printers on the
# network, we just need to codify them in a script and move
# this out of here. It isn't currently used for anything.
PRINTER,
};
## A structure to represent the numeric version of software.
type Version: record {
major: count &optional; ##< Major version number
minor: count &optional; ##< Minor version number
minor2: count &optional; ##< Minor subversion number
addl: string &optional; ##< Additional version string (e.g. "beta42")
## Major version number
major: count &optional;
## Minor version number
minor: count &optional;
## Minor subversion number
minor2: count &optional;
## Additional version string (e.g. "beta42")
addl: string &optional;
} &log;
## The record type that is used for representing and logging software.
type Info: record {
## The time at which the software was first detected.
## The time at which the software was detected.
ts: time &log;
## The IP address detected running the software.
host: addr &log;
## The type of software detected (e.g. WEB_SERVER)
## The type of software detected (e.g. :bro:enum:`HTTP::SERVER`).
software_type: Type &log &default=UNKNOWN;
## Name of the software (e.g. Apache)
## Name of the software (e.g. Apache).
name: string &log;
## Version of the software
## Version of the software.
version: Version &log;
## The full unparsed version string found because the version parsing
## doesn't work 100% reliably and this acts as a fall back in the logs.
## doesn't always work reliably in all cases and this acts as a
## fallback in the logs.
unparsed_version: string &log &optional;
## This can indicate that this software being detected should
@ -55,37 +60,48 @@ export {
force_log: bool &default=F;
};
## The hosts whose software should be detected and tracked.
## Hosts whose software should be detected and tracked.
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS
const asset_tracking = LOCAL_HOSTS &redef;
## Other scripts should call this function when they detect software.
## unparsed_version: This is the full string from which the
## :bro:type:`Software::Info` was extracted.
##
## id: The connection id where the software was discovered.
##
## info: A record representing the software discovered.
##
## Returns: T if the software was logged, F otherwise.
global found: function(id: conn_id, info: Software::Info): bool;
## This function can take many software version strings and parse them
## Take many common software version strings and parse them
## into a sensible :bro:type:`Software::Version` record. There are
## still many cases where scripts may have to have their own specific
## version parsing though.
##
## unparsed_version: The raw version string.
##
## host: The host where the software was discovered.
##
## software_type: The type of software.
##
## Returns: A complete record ready for the :bro:id:`Software::found` function.
global parse: function(unparsed_version: string,
host: addr,
software_type: Type): Info;
## Compare two versions.
## Compare two version records.
##
## Returns: -1 for v1 < v2, 0 for v1 == v2, 1 for v1 > v2.
## If the numerical version numbers match, the addl string
## is compared lexicographically.
global cmp_versions: function(v1: Version, v2: Version): int;
## This type represents a set of software. It's used by the
## :bro:id:`tracked` variable to store all known pieces of software
## for a particular host. It's indexed with the name of a piece of
## software such as "Firefox" and it yields a
## :bro:type:`Software::Info` record with more information about the
## software.
## Type to represent a collection of :bro:type:`Software::Info` records.
## It's indexed with the name of a piece of software such as "Firefox"
## and it yields a :bro:type:`Software::Info` record with more information
## about the software.
type SoftwareSet: table[string] of Info;
## The set of software associated with an address. Data expires from

File diff suppressed because it is too large Load diff

View file

@ -1,23 +1,27 @@
##! This script can be used to extract either the originator's data or the
##! responders data or both. By default nothing is extracted, and in order
##! to actually extract data the ``c$extract_orig`` and/or the
##! ``c$extract_resp`` variable must be set to T. One way to achieve this
##! would be to handle the connection_established event elsewhere and set the
##! extract_orig and extract_resp options there. However, there may be trouble
##! with the timing due the event queue delay.
##! This script does not work well in a cluster context unless it has a
##! remotely mounted disk to write the content files to.
##! ``c$extract_resp`` variable must be set to ``T``. One way to achieve this
##! would be to handle the :bro:id:`connection_established` event elsewhere
##! and set the ``extract_orig`` and ``extract_resp`` options there.
##! However, there may be trouble with the timing due to event queue delay.
##!
##! .. note::
##!
##! This script does not work well in a cluster context unless it has a
##! remotely mounted disk to write the content files to.
@load base/utils/files
module Conn;
export {
## The prefix given to files as they are opened on disk.
## The prefix given to files containing extracted connections as they are
## opened on disk.
const extraction_prefix = "contents" &redef;
## If this variable is set to T, then all contents of all files will be
## extracted.
## If this variable is set to ``T``, then all contents of all connections
## will be extracted.
const default_extract = F &redef;
}

View file

@ -4,7 +4,7 @@
module Conn;
export {
## Define inactivty timeouts by the service detected being used over
## Define inactivity timeouts by the service detected being used over
## the connection.
const analyzer_inactivity_timeouts: table[AnalyzerTag] of interval = {
# For interactive services, allow longer periods of inactivity.

View file

@ -1,17 +1,33 @@
##! This script manages the tracking/logging of general information regarding
##! TCP, UDP, and ICMP traffic. For UDP and ICMP, "connections" are to
##! be interpreted using flow semantics (sequence of packets from a source
##! host/post to a destination host/port). Further, ICMP "ports" are to
##! be interpreted as the source port meaning the ICMP message type and
##! the destination port being the ICMP message code.
@load base/utils/site
module Conn;
export {
## The connection logging stream identifier.
redef enum Log::ID += { LOG };
## The record type which contains column fields of the connection log.
type Info: record {
## This is the time of the first packet.
ts: time &log;
## A unique identifier of a connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## The transport layer protocol of the connection.
proto: transport_proto &log;
## An identification of an application protocol being sent over the
## the connection.
service: string &log &optional;
## How long the connection lasted. For 3-way or 4-way connection
## tear-downs, this will not include the final ACK.
duration: interval &log &optional;
## The number of payload bytes the originator sent. For TCP
## this is taken from sequence numbers and might be inaccurate
@ -51,8 +67,8 @@ export {
## have been completed prior to the packet loss.
missed_bytes: count &log &default=0;
## Records the state history of (TCP) connections as
## a string of letters.
## Records the state history of connections as a string of letters.
## For TCP connections the meaning of those letters is:
##
## ====== ====================================================
## Letter Meaning
@ -71,7 +87,8 @@ export {
## originator and lower case then means the responder.
## Also, there is compression. We only record one "d" in each direction,
## for instance. I.e., we just record that data went in that direction.
## This history is not meant to encode how much data that happened to be.
## This history is not meant to encode how much data that happened to
## be.
history: string &log &optional;
## Number of packets the originator sent.
## Only set if :bro:id:`use_conn_size_analyzer` = T
@ -85,7 +102,9 @@ export {
## Number IP level bytes the responder sent. See ``orig_pkts``.
resp_ip_bytes: count &log &optional;
};
## Event that can be handled to access the :bro:type:`Conn::Info`
## record as it is sent on to the logging framework.
global log_conn: event(rec: Info);
}

View file

@ -4,9 +4,9 @@
module DNS;
export {
const PTR = 12;
const EDNS = 41;
const ANY = 255;
const PTR = 12; ##< RR TYPE value for a domain name pointer.
const EDNS = 41; ##< An OPT RR TYPE value described by EDNS.
const ANY = 255; ##< A QTYPE value describing a request for all records.
## Mapping of DNS query type codes to human readable string representation.
const query_types = {
@ -29,50 +29,43 @@ export {
[ANY] = "*",
} &default = function(n: count): string { return fmt("query-%d", n); };
const code_types = {
[0] = "X0",
[1] = "Xfmt",
[2] = "Xsrv",
[3] = "Xnam",
[4] = "Ximp",
[5] = "X[",
} &default="?";
## Errors used for non-TSIG/EDNS types.
const base_errors = {
[0] = "NOERROR", ##< No Error
[1] = "FORMERR", ##< Format Error
[2] = "SERVFAIL", ##< Server Failure
[3] = "NXDOMAIN", ##< Non-Existent Domain
[4] = "NOTIMP", ##< Not Implemented
[5] = "REFUSED", ##< Query Refused
[6] = "YXDOMAIN", ##< Name Exists when it should not
[7] = "YXRRSET", ##< RR Set Exists when it should not
[8] = "NXRRSet", ##< RR Set that should exist does not
[9] = "NOTAUTH", ##< Server Not Authoritative for zone
[10] = "NOTZONE", ##< Name not contained in zone
[11] = "unassigned-11", ##< available for assignment
[12] = "unassigned-12", ##< available for assignment
[13] = "unassigned-13", ##< available for assignment
[14] = "unassigned-14", ##< available for assignment
[15] = "unassigned-15", ##< available for assignment
[16] = "BADVERS", ##< for EDNS, collision w/ TSIG
[17] = "BADKEY", ##< Key not recognized
[18] = "BADTIME", ##< Signature out of time window
[19] = "BADMODE", ##< Bad TKEY Mode
[20] = "BADNAME", ##< Duplicate key name
[21] = "BADALG", ##< Algorithm not supported
[22] = "BADTRUNC", ##< draft-ietf-dnsext-tsig-sha-05.txt
[3842] = "BADSIG", ##< 16 <= number collision with EDNS(16);
##< this is a translation from TSIG(16)
[0] = "NOERROR", # No Error
[1] = "FORMERR", # Format Error
[2] = "SERVFAIL", # Server Failure
[3] = "NXDOMAIN", # Non-Existent Domain
[4] = "NOTIMP", # Not Implemented
[5] = "REFUSED", # Query Refused
[6] = "YXDOMAIN", # Name Exists when it should not
[7] = "YXRRSET", # RR Set Exists when it should not
[8] = "NXRRSet", # RR Set that should exist does not
[9] = "NOTAUTH", # Server Not Authoritative for zone
[10] = "NOTZONE", # Name not contained in zone
[11] = "unassigned-11", # available for assignment
[12] = "unassigned-12", # available for assignment
[13] = "unassigned-13", # available for assignment
[14] = "unassigned-14", # available for assignment
[15] = "unassigned-15", # available for assignment
[16] = "BADVERS", # for EDNS, collision w/ TSIG
[17] = "BADKEY", # Key not recognized
[18] = "BADTIME", # Signature out of time window
[19] = "BADMODE", # Bad TKEY Mode
[20] = "BADNAME", # Duplicate key name
[21] = "BADALG", # Algorithm not supported
[22] = "BADTRUNC", # draft-ietf-dnsext-tsig-sha-05.txt
[3842] = "BADSIG", # 16 <= number collision with EDNS(16);
# this is a translation from TSIG(16)
} &default = function(n: count): string { return fmt("rcode-%d", n); };
# This deciphers EDNS Z field values.
## This deciphers EDNS Z field values.
const edns_zfield = {
[0] = "NOVALUE", # regular entry
[32768] = "DNS_SEC_OK", # accepts DNS Sec RRs
} &default="?";
## Possible values of the CLASS field in resource records or QCLASS field
## in query messages.
const classes = {
[1] = "C_INTERNET",
[2] = "C_CSNET",
@ -81,4 +74,4 @@ export {
[254] = "C_NONE",
[255] = "C_ANY",
} &default = function(n: count): string { return fmt("qclass-%d", n); };
}
}

View file

@ -1,54 +1,106 @@
##! Base DNS analysis script which tracks and logs DNS queries along with
##! their responses.
@load ./consts
module DNS;
export {
## The DNS logging stream identifier.
redef enum Log::ID += { LOG };
## The record type which contains the column fields of the DNS log.
type Info: record {
ts: time &log;
uid: string &log;
id: conn_id &log;
proto: transport_proto &log;
trans_id: count &log &optional;
query: string &log &optional;
qclass: count &log &optional;
qclass_name: string &log &optional;
qtype: count &log &optional;
qtype_name: string &log &optional;
rcode: count &log &optional;
rcode_name: string &log &optional;
QR: bool &log &default=F;
AA: bool &log &default=F;
TC: bool &log &default=F;
RD: bool &log &default=F;
RA: bool &log &default=F;
Z: count &log &default=0;
TTL: interval &log &optional;
answers: set[string] &log &optional;
## This value indicates if this request/response pair is ready to be logged.
## The earliest time at which a DNS protocol message over the
## associated connection is observed.
ts: time &log;
## A unique identifier of the connection over which DNS messages
## are being transferred.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## The transport layer protocol of the connection.
proto: transport_proto &log;
## A 16 bit identifier assigned by the program that generated the
## DNS query. Also used in responses to match up replies to
## outstanding queries.
trans_id: count &log &optional;
## The domain name that is the subject of the DNS query.
query: string &log &optional;
## The QCLASS value specifying the class of the query.
qclass: count &log &optional;
## A descriptive name for the class of the query.
qclass_name: string &log &optional;
## A QTYPE value specifying the type of the query.
qtype: count &log &optional;
## A descriptive name for the type of the query.
qtype_name: string &log &optional;
## The response code value in DNS response messages.
rcode: count &log &optional;
## A descriptive name for the response code value.
rcode_name: string &log &optional;
## Whether the message is a query (F) or response (T).
QR: bool &log &default=F;
## The Authoritative Answer bit for response messages specifies that
## the responding name server is an authority for the domain name
## in the question section.
AA: bool &log &default=F;
## The Truncation bit specifies that the message was truncated.
TC: bool &log &default=F;
## The Recursion Desired bit indicates to a name server to recursively
## purse the query.
RD: bool &log &default=F;
## The Recursion Available bit in a response message indicates if
## the name server supports recursive queries.
RA: bool &log &default=F;
## A reserved field that is currently supposed to be zero in all
## queries and responses.
Z: count &log &default=0;
## The set of resource descriptions in answer of the query.
answers: vector of string &log &optional;
## The caching intervals of the associated RRs described by the
## ``answers`` field.
TTLs: vector of interval &log &optional;
## This value indicates if this request/response pair is ready to be
## logged.
ready: bool &default=F;
## The total number of resource records in a reply message's answer
## section.
total_answers: count &optional;
## The total number of resource records in a reply message's answer,
## authority, and additional sections.
total_replies: count &optional;
};
## A record type which tracks the status of DNS queries for a given
## :bro:type:`connection`.
type State: record {
## Indexed by query id, returns Info record corresponding to
## query/response which haven't completed yet.
pending: table[count] of Info &optional;
## This is the list of DNS responses that have completed based on the
## number of responses declared and the number received. The contents
## of the set are transaction IDs.
finished_answers: set[count] &optional;
};
## An event that can be handled to access the :bro:type:`DNS::Info`
## record as it is sent to the logging framework.
global log_dns: event(rec: Info);
## This is called by the specific dns_*_reply events with a "reply" which
## may not represent the full data available from the resource record, but
## may not represent the full data available from the resource record, but
## it's generally considered a summarization of the response(s).
##
## c: The connection record for which to fill in DNS reply data.
##
## msg: The DNS message header information for the response.
##
## ans: The general information of a RR response.
##
## reply: The specific response information according to RR type/class.
global do_reply: event(c: connection, msg: dns_msg, ans: dns_answer, reply: string);
}
@ -58,11 +110,11 @@ redef record connection += {
};
# DPD configuration.
redef capture_filters += {
redef capture_filters += {
["dns"] = "port 53",
["mdns"] = "udp and port 5353",
["llmns"] = "udp and port 5355",
["netbios-ns"] = "udp port 137",
["netbios-ns"] = "udp port 137",
};
const dns_ports = { 53/udp, 53/tcp, 137/udp, 5353/udp, 5355/udp };
@ -89,7 +141,7 @@ function new_session(c: connection, trans_id: count): Info
state$finished_answers=set();
c$dns_state = state;
}
local info: Info;
info$ts = network_time();
info$id = c$id;
@ -102,23 +154,29 @@ function new_session(c: connection, trans_id: count): Info
function set_session(c: connection, msg: dns_msg, is_query: bool)
{
if ( ! c?$dns_state || msg$id !in c$dns_state$pending )
{
c$dns_state$pending[msg$id] = new_session(c, msg$id);
# Try deleting this transaction id from the set of finished answers.
# Sometimes hosts will reuse ports and transaction ids and this should
# be considered to be a legit scenario (although bad practice).
delete c$dns_state$finished_answers[msg$id];
}
c$dns = c$dns_state$pending[msg$id];
c$dns$rcode = msg$rcode;
c$dns$rcode_name = base_errors[msg$rcode];
if ( ! is_query )
{
if ( ! c$dns?$total_answers )
c$dns$total_answers = msg$num_answers;
if ( c$dns?$total_replies &&
if ( c$dns?$total_replies &&
c$dns$total_replies != msg$num_answers + msg$num_addl + msg$num_auth )
{
event conn_weird("dns_changed_number_of_responses", c,
fmt("The declared number of responses changed from %d to %d",
event conn_weird("dns_changed_number_of_responses", c,
fmt("The declared number of responses changed from %d to %d",
c$dns$total_replies,
msg$num_answers + msg$num_addl + msg$num_auth));
}
@ -129,27 +187,30 @@ function set_session(c: connection, msg: dns_msg, is_query: bool)
}
}
}
event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=5
{
set_session(c, msg, F);
c$dns$AA = msg$AA;
c$dns$RA = msg$RA;
c$dns$TTL = ans$TTL;
if ( ans$answer_type == DNS_ANS )
{
c$dns$AA = msg$AA;
c$dns$RA = msg$RA;
if ( msg$id in c$dns_state$finished_answers )
event conn_weird("dns_reply_seen_after_done", c, "");
if ( reply != "" )
{
if ( ! c$dns?$answers )
c$dns$answers = set();
add c$dns$answers[reply];
c$dns$answers = vector();
c$dns$answers[|c$dns$answers|] = reply;
if ( ! c$dns?$TTLs )
c$dns$TTLs = vector();
c$dns$TTLs[|c$dns$TTLs|] = ans$TTL;
}
if ( c$dns?$answers && |c$dns$answers| == c$dns$total_answers )
{
add c$dns_state$finished_answers[c$dns$trans_id];
@ -158,13 +219,12 @@ event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string)
}
}
}
event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=-5
{
if ( c$dns$ready )
{
Log::write(DNS::LOG, c$dns);
add c$dns_state$finished_answers[c$dns$trans_id];
# This record is logged and no longer pending.
delete c$dns_state$pending[c$dns$trans_id];
}
@ -173,41 +233,41 @@ event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string)
event dns_request(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count) &priority=5
{
set_session(c, msg, T);
c$dns$RD = msg$RD;
c$dns$TC = msg$TC;
c$dns$qclass = qclass;
c$dns$qclass_name = classes[qclass];
c$dns$qtype = qtype;
c$dns$qtype_name = query_types[qtype];
# Decode netbios name queries
# Note: I'm ignoring the name type for now. Not sure if this should be
# Note: I'm ignoring the name type for now. Not sure if this should be
# worked into the query/response in some fashion.
if ( c$id$resp_p == 137/udp )
query = decode_netbios_name(query);
c$dns$query = query;
c$dns$Z = msg$Z;
}
event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5
{
event DNS::do_reply(c, msg, ans, fmt("%s", a));
}
event dns_TXT_reply(c: connection, msg: dns_msg, ans: dns_answer, str: string) &priority=5
{
event DNS::do_reply(c, msg, ans, str);
}
event dns_AAAA_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr,
event dns_AAAA_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr,
astr: string) &priority=5
{
# TODO: What should we do with astr?
event DNS::do_reply(c, msg, ans, fmt("%s", a));
}
event dns_NS_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5
{
event DNS::do_reply(c, msg, ans, name);
@ -223,12 +283,12 @@ event dns_MX_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string,
{
event DNS::do_reply(c, msg, ans, name);
}
event dns_PTR_reply(c: connection, msg: dns_msg, ans: dns_answer, name: string) &priority=5
{
event DNS::do_reply(c, msg, ans, name);
}
event dns_SOA_reply(c: connection, msg: dns_msg, ans: dns_answer, soa: dns_soa) &priority=5
{
event DNS::do_reply(c, msg, ans, soa$mname);
@ -238,7 +298,7 @@ event dns_WKS_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5
{
event DNS::do_reply(c, msg, ans, "");
}
event dns_SRV_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5
{
event DNS::do_reply(c, msg, ans, "");
@ -247,17 +307,17 @@ event dns_SRV_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5
# TODO: figure out how to handle these
#event dns_EDNS(c: connection, msg: dns_msg, ans: dns_answer)
# {
#
#
# }
#
#event dns_EDNS_addl(c: connection, msg: dns_msg, ans: dns_edns_additional)
# {
#
#
# }
#
#event dns_TSIG_addl(c: connection, msg: dns_msg, ans: dns_tsig_additional)
# {
#
#
# }
@ -271,10 +331,10 @@ event connection_state_remove(c: connection) &priority=-5
{
if ( ! c?$dns_state )
return;
# If Bro is expiring state, we should go ahead and log all unlogged
# If Bro is expiring state, we should go ahead and log all unlogged
# request/response pairs now.
for ( trans_id in c$dns_state$pending )
Log::write(DNS::LOG, c$dns_state$pending[trans_id]);
}

View file

@ -1,4 +1,4 @@
##! File extraction for FTP.
##! File extraction support for FTP.
@load ./main
@load base/utils/files
@ -6,7 +6,7 @@
module FTP;
export {
## Pattern of file mime types to extract from FTP entity bodies.
## Pattern of file mime types to extract from FTP transfers.
const extract_file_types = /NO_DEFAULT/ &redef;
## The on-disk prefix for files to be extracted from FTP-data transfers.
@ -14,10 +14,15 @@ export {
}
redef record Info += {
## The file handle for the file to be extracted
## On disk file where it was extracted to.
extraction_file: file &log &optional;
## Indicates if the current command/response pair should attempt to
## extract the file if a file was transferred.
extract_file: bool &default=F;
## Internal tracking of the total number of files extracted during this
## session.
num_extracted_files: count &default=0;
};
@ -33,7 +38,6 @@ event file_transferred(c: connection, prefix: string, descr: string,
if ( extract_file_types in s$mime_type )
{
s$extract_file = T;
add s$tags["extracted_file"];
++s$num_extracted_files;
}
}

View file

@ -1,11 +1,7 @@
##! The logging this script does is primarily focused on logging FTP commands
##! along with metadata. For example, if files are transferred, the argument
##! will take on the full path that the client is at along with the requested
##! file name.
##!
##! TODO:
##!
##! * Handle encrypted sessions correctly (get an example?)
##! file name.
@load ./utils-commands
@load base/utils/paths
@ -14,38 +10,64 @@
module FTP;
export {
## The FTP protocol logging stream identifier.
redef enum Log::ID += { LOG };
## List of commands that should have their command/response pairs logged.
const logged_commands = {
"APPE", "DELE", "RETR", "STOR", "STOU", "ACCT"
} &redef;
## This setting changes if passwords used in FTP sessions are captured or not.
const default_capture_password = F &redef;
## User IDs that can be considered "anonymous".
const guest_ids = { "anonymous", "ftp", "guest" } &redef;
type Info: record {
## Time when the command was sent.
ts: time &log;
uid: string &log;
id: conn_id &log;
## User name for the current FTP session.
user: string &log &default="<unknown>";
## Password for the current FTP session if captured.
password: string &log &optional;
## Command given by the client.
command: string &log &optional;
## Argument for the command if one is given.
arg: string &log &optional;
## Libmagic "sniffed" file type if the command indicates a file transfer.
mime_type: string &log &optional;
## Libmagic "sniffed" file description if the command indicates a file transfer.
mime_desc: string &log &optional;
## Size of the file if the command indicates a file transfer.
file_size: count &log &optional;
## Reply code from the server in response to the command.
reply_code: count &log &optional;
## Reply message from the server in response to the command.
reply_msg: string &log &optional;
## Arbitrary tags that may indicate a particular attribute of this command.
tags: set[string] &log &default=set();
## By setting the CWD to '/.', we can indicate that unless something
## Current working directory that this session is in. By making
## the default value '/.', we can indicate that unless something
## more concrete is discovered that the existing but unknown
## directory is ok to use.
cwd: string &default="/.";
## Command that is currently waiting for a response.
cmdarg: CmdArg &optional;
## Queue for commands that have been sent but not yet responded to
## are tracked here.
pending_commands: PendingCmds;
## This indicates if the session is in active or passive mode.
## Indicates if the session is in active or passive mode.
passive: bool &default=F;
## This determines if the password will be captured for this request.
## Determines if the password will be captured for this request.
capture_password: bool &default=default_capture_password;
};
@ -56,22 +78,12 @@ export {
y: count;
z: count;
};
# TODO: add this back in some form. raise a notice again?
#const excessive_filename_len = 250 &redef;
#const excessive_filename_trunc_len = 32 &redef;
## These are user IDs that can be considered "anonymous".
const guest_ids = { "anonymous", "ftp", "guest" } &redef;
## The list of commands that should have their command/response pairs logged.
const logged_commands = {
"APPE", "DELE", "RETR", "STOR", "STOU", "ACCT"
} &redef;
## This function splits FTP reply codes into the three constituent
## Parse FTP reply codes into the three constituent single digit values.
global parse_ftp_reply_code: function(code: count): ReplyCode;
## Event that can be handled to access the :bro:type:`FTP::Info`
## record as it is sent on to the logging framework.
global log_ftp: event(rec: Info);
}

View file

@ -2,14 +2,22 @@ module FTP;
export {
type CmdArg: record {
## Time when the command was sent.
ts: time;
## Command.
cmd: string &default="<unknown>";
## Argument for the command if one was given.
arg: string &default="";
## Counter to track how many commands have been executed.
seq: count &default=0;
};
## Structure for tracking pending commands in the event that the client
## sends a large number of commands before the server has a chance to
## reply.
type PendingCmds: table[count] of CmdArg;
## Possible response codes for a wide variety of FTP commands.
const cmd_reply_code: set[string, count] = {
# According to RFC 959
["<init>", [120, 220, 421]],

View file

@ -8,29 +8,24 @@
module HTTP;
export {
## Pattern of file mime types to extract from HTTP entity bodies.
## Pattern of file mime types to extract from HTTP response entity bodies.
const extract_file_types = /NO_DEFAULT/ &redef;
## The on-disk prefix for files to be extracted from HTTP entity bodies.
const extraction_prefix = "http-item" &redef;
redef record Info += {
## This field can be set per-connection to determine if the entity body
## will be extracted. It must be set to T on or before the first
## entity_body_data event.
extracting_file: bool &default=F;
## This is the holder for the file handle as the file is being written
## to disk.
## On-disk file where the response body was extracted to.
extraction_file: file &log &optional;
};
redef record State += {
entity_bodies: count &default=0;
## Indicates if the response body is to be extracted or not. Must be
## set before or by the first :bro:id:`http_entity_data` event for the
## content.
extract_file: bool &default=F;
};
}
event http_entity_data(c: connection, is_orig: bool, length: count, data: string) &priority=5
event http_entity_data(c: connection, is_orig: bool, length: count, data: string) &priority=-5
{
# Client body extraction is not currently supported in this script.
if ( is_orig )
@ -41,8 +36,12 @@ event http_entity_data(c: connection, is_orig: bool, length: count, data: string
if ( c$http?$mime_type &&
extract_file_types in c$http$mime_type )
{
c$http$extracting_file = T;
local suffix = fmt("%s_%d.dat", is_orig ? "orig" : "resp", ++c$http_state$entity_bodies);
c$http$extract_file = T;
}
if ( c$http$extract_file )
{
local suffix = fmt("%s_%d.dat", is_orig ? "orig" : "resp", c$http_state$current_response);
local fname = generate_extraction_filename(extraction_prefix, c, suffix);
c$http$extraction_file = open(fname);
@ -50,12 +49,12 @@ event http_entity_data(c: connection, is_orig: bool, length: count, data: string
}
}
if ( c$http$extracting_file )
if ( c$http?$extraction_file )
print c$http$extraction_file, data;
}
event http_end_entity(c: connection, is_orig: bool)
{
if ( c$http$extracting_file )
if ( c$http?$extraction_file )
close(c$http$extraction_file);
}

View file

@ -11,7 +11,8 @@ export {
};
redef record Info += {
## The MD5 sum for a file transferred over HTTP will be stored here.
## MD5 sum for a file transferred over HTTP calculated from the
## response body.
md5: string &log &optional;
## This value can be set per-transfer to determine per request
@ -19,8 +20,8 @@ export {
## set to T at the time of or before the first chunk of body data.
calc_md5: bool &default=F;
## This boolean value indicates if an MD5 sum is currently being
## calculated for the current file transfer.
## Indicates if an MD5 sum is being calculated for the current
## request/response pair.
calculating_md5: bool &default=F;
};

View file

@ -1,5 +1,4 @@
##! This script is involved in the identification of file types in HTTP
##! response bodies.
##! Identification of file types in HTTP response bodies with file content sniffing.
@load base/frameworks/signatures
@load base/frameworks/notice
@ -15,30 +14,32 @@ module HTTP;
export {
redef enum Notice::Type += {
# This notice is thrown when the file extension doesn't
# seem to match the file contents.
## Indicates when the file extension doesn't seem to match the file contents.
Incorrect_File_Type,
};
redef record Info += {
## This will record the mime_type identified.
## Mime type of response body identified by content sniffing.
mime_type: string &log &optional;
## This indicates that no data of the current file transfer has been
## Indicates that no data of the current file transfer has been
## seen yet. After the first :bro:id:`http_entity_data` event, it
## will be set to T.
## will be set to F.
first_chunk: bool &default=T;
};
redef enum Tags += {
IDENTIFIED_FILE
};
# Create regexes that *should* in be in the urls for specifics mime types.
# Notices are thrown if the pattern doesn't match the url for the file type.
## Mapping between mime types and regular expressions for URLs
## The :bro:enum:`HTTP::Incorrect_File_Type` notice is generated if the pattern
## doesn't match the mime type that was discovered.
const mime_types_extensions: table[string] of pattern = {
["application/x-dosexec"] = /\.([eE][xX][eE]|[dD][lL][lL])/,
} &redef;
## A pattern for filtering out :bro:enum:`HTTP::Incorrect_File_Type` urls
## that are not noteworthy before a notice is created. Each
## pattern added should match the complete URL (the matched URLs include
## "http://" at the beginning).
const ignored_incorrect_file_type_urls = /^$/ &redef;
}
event signature_match(state: signature_state, msg: string, data: string) &priority=5
@ -59,6 +60,10 @@ event signature_match(state: signature_state, msg: string, data: string) &priori
c$http?$uri && mime_types_extensions[msg] !in c$http$uri )
{
local url = build_url_http(c$http);
if ( url == ignored_incorrect_file_type_urls )
return;
local message = fmt("%s %s %s", msg, c$http$method, url);
NOTICE([$note=Incorrect_File_Type,
$msg=message,

View file

@ -1,3 +1,7 @@
##! Implements base functionality for HTTP analysis. The logging model is
##! to log request/response pairs and all relevant metadata together in
##! a single record.
@load base/utils/numbers
@load base/utils/files
@ -8,6 +12,7 @@ export {
## Indicate a type of attack or compromise in the record to be logged.
type Tags: enum {
## Placeholder.
EMPTY
};
@ -15,64 +20,69 @@ export {
const default_capture_password = F &redef;
type Info: record {
ts: time &log;
uid: string &log;
id: conn_id &log;
## This represents the pipelined depth into the connection of this
## Timestamp for when the request happened.
ts: time &log;
uid: string &log;
id: conn_id &log;
## Represents the pipelined depth into the connection of this
## request/response transaction.
trans_depth: count &log;
## The verb used in the HTTP request (GET, POST, HEAD, etc.).
method: string &log &optional;
## The value of the HOST header.
host: string &log &optional;
## The URI used in the request.
uri: string &log &optional;
## The value of the "referer" header. The comment is deliberately
trans_depth: count &log;
## Verb used in the HTTP request (GET, POST, HEAD, etc.).
method: string &log &optional;
## Value of the HOST header.
host: string &log &optional;
## URI used in the request.
uri: string &log &optional;
## Value of the "referer" header. The comment is deliberately
## misspelled like the standard declares, but the name used here is
## "referrer" spelled correctly.
referrer: string &log &optional;
## The value of the User-Agent header from the client.
user_agent: string &log &optional;
## The actual uncompressed content size of the data transferred from
referrer: string &log &optional;
## Value of the User-Agent header from the client.
user_agent: string &log &optional;
## Actual uncompressed content size of the data transferred from
## the client.
request_body_len: count &log &default=0;
## The actual uncompressed content size of the data transferred from
request_body_len: count &log &default=0;
## Actual uncompressed content size of the data transferred from
## the server.
response_body_len: count &log &default=0;
## The status code returned by the server.
## Status code returned by the server.
status_code: count &log &optional;
## The status message returned by the server.
## Status message returned by the server.
status_msg: string &log &optional;
## The last 1xx informational reply code returned by the server.
## Last seen 1xx informational reply code returned by the server.
info_code: count &log &optional;
## The last 1xx informational reply message returned by the server.
## Last seen 1xx informational reply message returned by the server.
info_msg: string &log &optional;
## The filename given in the Content-Disposition header
## sent by the server.
## Filename given in the Content-Disposition header sent by the server.
filename: string &log &optional;
## This is a set of indicators of various attributes discovered and
## A set of indicators of various attributes discovered and
## related to a particular request/response pair.
tags: set[Tags] &log;
## The username if basic-auth is performed for the request.
## Username if basic-auth is performed for the request.
username: string &log &optional;
## The password if basic-auth is performed for the request.
## Password if basic-auth is performed for the request.
password: string &log &optional;
## This determines if the password will be captured for this request.
## Determines if the password will be captured for this request.
capture_password: bool &default=default_capture_password;
## All of the headers that may indicate if the request was proxied.
proxied: set[string] &log &optional;
};
## Structure to maintain state for an HTTP connection with multiple
## requests and responses.
type State: record {
## Pending requests.
pending: table[count] of Info;
current_response: count &default=0;
## Current request in the pending queue.
current_request: count &default=0;
## Current response in the pending queue.
current_response: count &default=0;
};
## The list of HTTP headers typically used to indicate a proxied request.
## A list of HTTP headers typically used to indicate proxied requests.
const proxy_headers: set[string] = {
"FORWARDED",
"X-FORWARDED-FOR",
@ -83,6 +93,8 @@ export {
"PROXY-CONNECTION",
} &redef;
## Event that can be handled to access the HTTP record as it is sent on
## to the logging framework.
global log_http: event(rec: Info);
}

View file

@ -5,8 +5,31 @@
module HTTP;
export {
## Given a string containing a series of key-value pairs separated by "=",
## this function can be used to parse out all of the key names.
##
## data: The raw data, such as a URL or cookie value.
##
## kv_splitter: A regular expression representing the separator between
## key-value pairs.
##
## Returns: A vector of strings containing the keys.
global extract_keys: function(data: string, kv_splitter: pattern): string_vec;
## Creates a URL from an :bro:type:`HTTP::Info` record. This should handle
## edge cases such as proxied requests appropriately.
##
## rec: An :bro:type:`HTTP::Info` record.
##
## Returns: A URL, not prefixed by "http://".
global build_url: function(rec: Info): string;
## Creates a URL from an :bro:type:`HTTP::Info` record. This should handle
## edge cases such as proxied requests appropriately.
##
## rec: An :bro:type:`HTTP::Info` record.
##
## Returns: A URL prefixed with "http://".
global build_url_http: function(rec: Info): string;
}

View file

@ -5,8 +5,9 @@
##! but that connection will actually be between B and C which could be
##! analyzed on a different worker.
##!
##! Example line from IRC server indicating that the DCC SEND is about to start:
##! PRIVMSG my_nick :^ADCC SEND whateverfile.zip 3640061780 1026 41709^A
# Example line from IRC server indicating that the DCC SEND is about to start:
# PRIVMSG my_nick :^ADCC SEND whateverfile.zip 3640061780 1026 41709^A
@load ./main
@load base/utils/files
@ -14,24 +15,25 @@
module IRC;
export {
redef enum Tag += { EXTRACTED_FILE };
## Pattern of file mime types to extract from IRC DCC file transfers.
const extract_file_types = /NO_DEFAULT/ &redef;
## The on-disk prefix for files to be extracted from IRC DCC file transfers.
## On-disk prefix for files to be extracted from IRC DCC file transfers.
const extraction_prefix = "irc-dcc-item" &redef;
redef record Info += {
dcc_file_name: string &log &optional;
dcc_file_size: count &log &optional;
dcc_mime_type: string &log &optional;
## DCC filename requested.
dcc_file_name: string &log &optional;
## Size of the DCC transfer as indicated by the sender.
dcc_file_size: count &log &optional;
## Sniffed mime type of the file.
dcc_mime_type: string &log &optional;
## The file handle for the file to be extracted
extraction_file: file &log &optional;
extraction_file: file &log &optional;
## A boolean to indicate if the current file transfer should be extraced.
extract_file: bool &default=F;
## A boolean to indicate if the current file transfer should be extracted.
extract_file: bool &default=F;
## The count of the number of file that have been extracted during the session.
num_extracted_files: count &default=0;
@ -54,8 +56,10 @@ event file_transferred(c: connection, prefix: string, descr: string,
if ( extract_file_types == irc$dcc_mime_type )
{
irc$extract_file = T;
add irc$tags[EXTRACTED_FILE];
}
if ( irc$extract_file )
{
local suffix = fmt("%d.dat", ++irc$num_extracted_files);
local fname = generate_extraction_filename(extraction_prefix, c, suffix);
irc$extraction_file = open(fname);
@ -76,7 +80,7 @@ event file_transferred(c: connection, prefix: string, descr: string,
Log::write(IRC::LOG, irc);
irc$command = tmp;
if ( irc$extract_file && irc?$extraction_file )
if ( irc?$extraction_file )
set_contents_file(id, CONTENTS_RESP, irc$extraction_file);
# Delete these values in case another DCC transfer

View file

@ -1,36 +1,38 @@
##! This is the script that implements the core IRC analysis support. It only
##! logs a very limited subset of the IRC protocol by default. The points
##! that it logs at are NICK commands, USER commands, and JOIN commands. It
##! log various bits of meta data as indicated in the :bro:type:`Info` record
##! along with the command at the command arguments.
##! Implements the core IRC analysis support. The logging model is to log
##! IRC commands along with the associated response and some additional
##! metadata about the connection if it's available.
module IRC;
export {
redef enum Log::ID += { LOG };
type Tag: enum {
EMPTY
};
type Info: record {
## Timestamp when the command was seen.
ts: time &log;
uid: string &log;
id: conn_id &log;
## Nick name given for the connection.
nick: string &log &optional;
## User name given for the connection.
user: string &log &optional;
channels: set[string] &log &optional;
## Command given by the client.
command: string &log &optional;
## Value for the command given by the client.
value: string &log &optional;
## Any additional data for the command.
addl: string &log &optional;
tags: set[Tag] &log;
};
## Event that can be handled to access the IRC record as it is sent on
## to the logging framework.
global irc_log: event(rec: Info);
}
redef record connection += {
## IRC session information.
irc: Info &optional;
};

View file

@ -14,15 +14,17 @@
module SSH;
export {
## The SSH protocol logging stream identifier.
redef enum Log::ID += { LOG };
redef enum Notice::Type += {
## This indicates that a heuristically detected "successful" SSH
## Indicates that a heuristically detected "successful" SSH
## authentication occurred.
Login
};
type Info: record {
## Time when the SSH connection began.
ts: time &log;
uid: string &log;
id: conn_id &log;
@ -34,11 +36,11 @@ export {
## would be set for the opposite situation.
# TODO: handle local-local and remote-remote better.
direction: Direction &log &optional;
## The software string given by the client.
## Software string given by the client.
client: string &log &optional;
## The software string given by the server.
## Software string given by the server.
server: string &log &optional;
## The amount of data returned from the server. This is currently
## Amount of data returned from the server. This is currently
## the only measure of the success heuristic and it is logged to
## assist analysts looking at the logs to make their own determination
## about the success on a case-by-case basis.
@ -48,8 +50,8 @@ export {
done: bool &default=F;
};
## The size in bytes at which the SSH connection is presumed to be
## successful.
## The size in bytes of data sent by the server at which the SSH
## connection is presumed to be successful.
const authentication_data_size = 5500 &redef;
## If true, we tell the event engine to not look at further data
@ -58,14 +60,16 @@ export {
## kinds of analyses (e.g., tracking connection size).
const skip_processing_after_detection = F &redef;
## This event is generated when the heuristic thinks that a login
## Event that is generated when the heuristic thinks that a login
## was successful.
global heuristic_successful_login: event(c: connection);
## This event is generated when the heuristic thinks that a login
## Event that is generated when the heuristic thinks that a login
## failed.
global heuristic_failed_login: event(c: connection);
## Event that can be handled to access the :bro:type:`SSH::Info`
## record as it is sent on to the logging framework.
global log_ssh: event(rec: Info);
}

View file

@ -1,18 +1,65 @@
module SSL;
export {
const SSLv2 = 0x0002;
const SSLv3 = 0x0300;
const TLSv10 = 0x0301;
const TLSv11 = 0x0302;
const TLSv12 = 0x0303;
## Mapping between the constants and string values for SSL/TLS versions.
const version_strings: table[count] of string = {
[SSLv2] = "SSLv2",
[SSLv3] = "SSLv3",
[TLSv10] = "TLSv10",
[TLSv11] = "TLSv11",
[TLSv12] = "TLSv12",
} &default="UNKNOWN";
## Mapping between numeric codes and human readable strings for alert
## levels.
const alert_levels: table[count] of string = {
[1] = "warning",
[2] = "fatal",
} &default=function(i: count):string { return fmt("unknown-%d", i); };
## Mapping between numeric codes and human readable strings for alert
## descriptions..
const alert_descriptions: table[count] of string = {
[0] = "close_notify",
[10] = "unexpected_message",
[20] = "bad_record_mac",
[21] = "decryption_failed",
[22] = "record_overflow",
[30] = "decompression_failure",
[40] = "handshake_failure",
[41] = "no_certificate",
[42] = "bad_certificate",
[43] = "unsupported_certificate",
[44] = "certificate_revoked",
[45] = "certificate_expired",
[46] = "certificate_unknown",
[47] = "illegal_parameter",
[48] = "unknown_ca",
[49] = "access_denied",
[50] = "decode_error",
[51] = "decrypt_error",
[60] = "export_restriction",
[70] = "protocol_version",
[71] = "insufficient_security",
[80] = "internal_error",
[90] = "user_canceled",
[100] = "no_renegotiation",
[110] = "unsupported_extension",
[111] = "certificate_unobtainable",
[112] = "unrecognized_name",
[113] = "bad_certificate_status_response",
[114] = "bad_certificate_hash_value",
[115] = "unknown_psk_identity",
} &default=function(i: count):string { return fmt("unknown-%d", i); };
## Mapping between numeric codes and human readable strings for SSL/TLS
## extensions.
# More information can be found here:
# http://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xml
const extensions: table[count] of string = {
[0] = "server_name",
@ -31,10 +78,11 @@ export {
[13] = "signature_algorithms",
[14] = "use_srtp",
[35] = "SessionTicket TLS",
[13172] = "next_protocol_negotiation",
[65281] = "renegotiation_info"
} &default=function(i: count):string { return fmt("unknown-%d", i); };
## SSLv2
# SSLv2
const SSLv20_CK_RC4_128_WITH_MD5 = 0x010080;
const SSLv20_CK_RC4_128_EXPORT40_WITH_MD5 = 0x020080;
const SSLv20_CK_RC2_128_CBC_WITH_MD5 = 0x030080;
@ -43,7 +91,7 @@ export {
const SSLv20_CK_DES_64_CBC_WITH_MD5 = 0x060040;
const SSLv20_CK_DES_192_EDE3_CBC_WITH_MD5 = 0x0700C0;
## TLS
# TLS
const TLS_NULL_WITH_NULL_NULL = 0x0000;
const TLS_RSA_WITH_NULL_MD5 = 0x0001;
const TLS_RSA_WITH_NULL_SHA = 0x0002;
@ -260,13 +308,11 @@ export {
const SSL_RSA_WITH_DES_CBC_MD5 = 0xFF82;
const SSL_RSA_WITH_3DES_EDE_CBC_MD5 = 0xFF83;
const TLS_EMPTY_RENEGOTIATION_INFO_SCSV = 0x00FF;
# --- This is a table of all known cipher specs.
# --- It can be used for detecting unknown ciphers and for
# --- converting the cipher spec constants into a human readable format.
## This is a table of all known cipher specs. It can be used for
## detecting unknown ciphers and for converting the cipher spec constants
## into a human readable format.
const cipher_desc: table[count] of string = {
# --- sslv20 ---
[SSLv20_CK_RC4_128_EXPORT40_WITH_MD5] =
"SSLv20_CK_RC4_128_EXPORT40_WITH_MD5",
[SSLv20_CK_RC4_128_WITH_MD5] = "SSLv20_CK_RC4_128_WITH_MD5",
@ -278,7 +324,6 @@ export {
"SSLv20_CK_DES_192_EDE3_CBC_WITH_MD5",
[SSLv20_CK_DES_64_CBC_WITH_MD5] = "SSLv20_CK_DES_64_CBC_WITH_MD5",
# --- TLS ---
[TLS_NULL_WITH_NULL_NULL] = "TLS_NULL_WITH_NULL_NULL",
[TLS_RSA_WITH_NULL_MD5] = "TLS_RSA_WITH_NULL_MD5",
[TLS_RSA_WITH_NULL_SHA] = "TLS_RSA_WITH_NULL_SHA",
@ -491,7 +536,8 @@ export {
[SSL_RSA_FIPS_WITH_DES_CBC_SHA_2] = "SSL_RSA_FIPS_WITH_DES_CBC_SHA_2",
[SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA_2] = "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA_2",
} &default="UNKNOWN";
## Mapping between the constants and string values for SSL/TLS errors.
const x509_errors: table[count] of string = {
[0] = "ok",
[1] = "unable to get issuer cert",
@ -526,8 +572,7 @@ export {
[30] = "akid issuer serial mismatch",
[31] = "keyusage no certsign",
[32] = "unable to get crl issuer",
[33] = "unhandled critical extension"
[33] = "unhandled critical extension",
};
}

View file

@ -1,3 +1,6 @@
##! Base SSL analysis script. This script logs information about the SSL/TLS
##! handshaking and encryption establishment process.
@load ./consts
module SSL;
@ -6,31 +9,45 @@ export {
redef enum Log::ID += { LOG };
type Info: record {
## Time when the SSL connection began.
ts: time &log;
uid: string &log;
id: conn_id &log;
## SSL/TLS version the server offered.
version: string &log &optional;
## SSL/TLS cipher suite the server chose.
cipher: string &log &optional;
## Value of the Server Name Indicator SSL/TLS extension. It
## indicates the server name that the client was requesting.
server_name: string &log &optional;
## Session ID offered by the client for session resumption.
session_id: string &log &optional;
## Subject of the X.509 certificate offered by the server.
subject: string &log &optional;
## NotValidBefore field value from the server certificate.
not_valid_before: time &log &optional;
## NotValidAfter field value from the serve certificate.
not_valid_after: time &log &optional;
## Last alert that was seen during the connection.
last_alert: string &log &optional;
## Full binary server certificate stored in DER format.
cert: string &optional;
## Chain of certificates offered by the server to validate its
## complete signing chain.
cert_chain: vector of string &optional;
## This stores the analyzer id used for the analyzer instance attached
## to each connection. It is not used for logging since it's a
## The analyzer ID used for the analyzer instance attached
## to each connection. It is not used for logging since it's a
## meaningless arbitrary number.
analyzer_id: count &optional;
};
## This is where the default root CA bundle is defined. By loading the
## The default root CA bundle. By loading the
## mozilla-ca-list.bro script it will be set to Mozilla's root CA list.
const root_certs: table[string] of string = {} &redef;
## If true, detach the SSL analyzer from the connection to prevent
## If true, detach the SSL analyzer from the connection to prevent
## continuing to process encrypted traffic. Helps with performance
## (especially with large file transfers).
const disable_analyzer_after_detection = T &redef;
@ -40,12 +57,9 @@ export {
## utility.
const openssl_util = "openssl" &redef;
## Event that can be handled to access the SSL
## record as it is sent on to the logging framework.
global log_ssl: event(rec: Info);
const ports = {
443/tcp, 563/tcp, 585/tcp, 614/tcp, 636/tcp,
989/tcp, 990/tcp, 992/tcp, 993/tcp, 995/tcp, 5223/tcp
} &redef;
}
redef record connection += {
@ -72,6 +86,11 @@ redef capture_filters += {
["xmpps"] = "tcp port 5223",
};
const ports = {
443/tcp, 563/tcp, 585/tcp, 614/tcp, 636/tcp,
989/tcp, 990/tcp, 992/tcp, 993/tcp, 995/tcp, 5223/tcp
};
redef dpd_config += {
[[ANALYZER_SSL]] = [$ports = ports]
};
@ -86,7 +105,7 @@ function set_session(c: connection)
if ( ! c?$ssl )
c$ssl = [$ts=network_time(), $uid=c$uid, $id=c$id, $cert_chain=vector()];
}
function finish(c: connection)
{
Log::write(SSL::LOG, c$ssl);
@ -98,29 +117,33 @@ function finish(c: connection)
event ssl_client_hello(c: connection, version: count, possible_ts: time, session_id: string, ciphers: count_set) &priority=5
{
set_session(c);
# Save the session_id if there is one set.
if ( session_id != /^\x00{32}$/ )
c$ssl$session_id = bytestring_to_hexstr(session_id);
}
event ssl_server_hello(c: connection, version: count, possible_ts: time, session_id: string, cipher: count, comp_method: count) &priority=5
{
set_session(c);
c$ssl$version = version_strings[version];
c$ssl$cipher = cipher_desc[cipher];
}
event x509_certificate(c: connection, cert: X509, is_server: bool, chain_idx: count, chain_len: count, der_cert: string) &priority=5
event x509_certificate(c: connection, is_orig: bool, cert: X509, chain_idx: count, chain_len: count, der_cert: string) &priority=5
{
set_session(c);
# We aren't doing anything with client certificates yet.
if ( is_orig )
return;
if ( chain_idx == 0 )
{
# Save the primary cert.
c$ssl$cert = der_cert;
# Also save other certificate information about the primary cert.
c$ssl$subject = cert$subject;
c$ssl$not_valid_before = cert$not_valid_before;
@ -132,20 +155,27 @@ event x509_certificate(c: connection, cert: X509, is_server: bool, chain_idx: co
c$ssl$cert_chain[|c$ssl$cert_chain|] = der_cert;
}
}
event ssl_extension(c: connection, code: count, val: string) &priority=5
event ssl_extension(c: connection, is_orig: bool, code: count, val: string) &priority=5
{
set_session(c);
if ( extensions[code] == "server_name" )
if ( is_orig && extensions[code] == "server_name" )
c$ssl$server_name = sub_bytes(val, 6, |val|);
}
event ssl_alert(c: connection, is_orig: bool, level: count, desc: count) &priority=5
{
set_session(c);
c$ssl$last_alert = alert_descriptions[desc];
}
event ssl_established(c: connection) &priority=5
{
set_session(c);
}
event ssl_established(c: connection) &priority=-5
{
finish(c);
@ -163,4 +193,4 @@ event protocol_violation(c: connection, atype: count, aid: count,
{
if ( c?$ssl )
finish(c);
}
}

View file

@ -1,6 +1,9 @@
##! Constants definitions for syslog.
module Syslog;
export {
## Mapping between the constants and string values for syslog facilities.
const facility_codes: table[count] of string = {
[0] = "KERN",
[1] = "USER",
@ -27,7 +30,8 @@ export {
[22] = "LOCAL6",
[23] = "LOCAL7",
} &default=function(c: count): string { return fmt("?-%d", c); };
## Mapping between the constants and string values for syslog severities.
const severity_codes: table[count] of string = {
[0] = "EMERG",
[1] = "ALERT",

View file

@ -1,4 +1,5 @@
##! Core script support for logging syslog messages.
##! Core script support for logging syslog messages. This script represents
##! one syslog message as one logged record.
@load ./consts
@ -8,19 +9,23 @@ export {
redef enum Log::ID += { LOG };
type Info: record {
## Timestamp of when the syslog message was seen.
ts: time &log;
uid: string &log;
id: conn_id &log;
## Protocol over which the message was seen.
proto: transport_proto &log;
## Syslog facility for the message.
facility: string &log;
## Syslog severity for the message.
severity: string &log;
## The plain text message.
message: string &log;
};
const ports = { 514/udp } &redef;
}
redef capture_filters += { ["syslog"] = "port 514" };
const ports = { 514/udp } &redef;
redef dpd_config += { [ANALYZER_SYSLOG_BINPAC] = [$ports = ports] };
redef likely_server_ports += { 514/udp };

View file

@ -18,7 +18,7 @@ export {
const local_nets: set[subnet] &redef;
## This is used for retrieving the subnet when you multiple
## :bro:id:`local_nets`. A membership query can be done with an
## :bro:id:`Site::local_nets`. A membership query can be done with an
## :bro:type:`addr` and the table will yield the subnet it was found
## within.
global local_nets_table: table[subnet] of subnet = {};