mirror of
https://github.com/zeek/zeek.git
synced 2025-10-13 12:08:20 +00:00
Merging master.
This commit is contained in:
commit
3546d93f36
1015 changed files with 214684 additions and 4605 deletions
|
@ -77,6 +77,9 @@ export {
|
|||
node_type: NodeType;
|
||||
## The IP address of the cluster node.
|
||||
ip: addr;
|
||||
## If the *ip* field is a non-global IPv6 address, this field
|
||||
## can specify a particular :rfc:`4007` ``zone_id``.
|
||||
zone_id: string &default="";
|
||||
## The port to which the this local node can connect when
|
||||
## establishing communication.
|
||||
p: port;
|
||||
|
|
|
@ -13,8 +13,12 @@
|
|||
## Turn off remote logging since this is the manager and should only log here.
|
||||
redef Log::enable_remote_logging = F;
|
||||
|
||||
## Log rotation interval.
|
||||
redef Log::default_rotation_interval = 1 hrs;
|
||||
|
||||
## Alarm summary mail interval.
|
||||
redef Log::default_mail_alarms_interval = 24 hrs;
|
||||
|
||||
## Use the cluster's archive logging script.
|
||||
redef Log::default_rotation_postprocessor_cmd = "archive-log";
|
||||
|
||||
|
|
|
@ -19,23 +19,26 @@ event bro_init() &priority=9
|
|||
# Connections from the control node for runtime control and update events.
|
||||
# Every node in a cluster is eligible for control from this host.
|
||||
if ( n$node_type == CONTROL )
|
||||
Communication::nodes["control"] = [$host=n$ip, $connect=F,
|
||||
$class="control", $events=control_events];
|
||||
Communication::nodes["control"] = [$host=n$ip, $zone_id=n$zone_id,
|
||||
$connect=F, $class="control",
|
||||
$events=control_events];
|
||||
|
||||
if ( me$node_type == MANAGER )
|
||||
{
|
||||
if ( n$node_type == WORKER && n$manager == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $connect=F,
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
|
||||
$class=i, $events=worker2manager_events, $request_logs=T];
|
||||
|
||||
if ( n$node_type == PROXY && n$manager == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $connect=F,
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
|
||||
$class=i, $events=proxy2manager_events, $request_logs=T];
|
||||
|
||||
if ( n$node_type == TIME_MACHINE && me?$time_machine && me$time_machine == i )
|
||||
Communication::nodes["time-machine"] = [$host=nodes[i]$ip, $p=nodes[i]$p,
|
||||
Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=1min,
|
||||
$events=tm2manager_events];
|
||||
}
|
||||
|
@ -44,7 +47,8 @@ event bro_init() &priority=9
|
|||
{
|
||||
if ( n$node_type == WORKER && n$proxy == node )
|
||||
Communication::nodes[i] =
|
||||
[$host=n$ip, $connect=F, $class=i, $sync=T, $auth=T, $events=worker2proxy_events];
|
||||
[$host=n$ip, $zone_id=n$zone_id, $connect=F, $class=i,
|
||||
$sync=T, $auth=T, $events=worker2proxy_events];
|
||||
|
||||
# accepts connections from the previous one.
|
||||
# (This is not ideal for setups with many proxies)
|
||||
|
@ -53,16 +57,18 @@ event bro_init() &priority=9
|
|||
{
|
||||
if ( n?$proxy )
|
||||
Communication::nodes[i]
|
||||
= [$host=n$ip, $p=n$p,
|
||||
= [$host=n$ip, $zone_id=n$zone_id, $p=n$p,
|
||||
$connect=T, $auth=F, $sync=T, $retry=1mins];
|
||||
else if ( me?$proxy && me$proxy == i )
|
||||
Communication::nodes[me$proxy]
|
||||
= [$host=nodes[i]$ip, $connect=F, $auth=T, $sync=T];
|
||||
= [$host=nodes[i]$ip, $zone_id=nodes[i]$zone_id,
|
||||
$connect=F, $auth=T, $sync=T];
|
||||
}
|
||||
|
||||
# Finally the manager, to send it status updates.
|
||||
if ( n$node_type == MANAGER && me$manager == i )
|
||||
Communication::nodes["manager"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=1mins,
|
||||
$class=node,
|
||||
|
@ -72,6 +78,7 @@ event bro_init() &priority=9
|
|||
{
|
||||
if ( n$node_type == MANAGER && me$manager == i )
|
||||
Communication::nodes["manager"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=1mins,
|
||||
$class=node,
|
||||
|
@ -79,6 +86,7 @@ event bro_init() &priority=9
|
|||
|
||||
if ( n$node_type == PROXY && me$proxy == i )
|
||||
Communication::nodes["proxy"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T, $retry=1mins,
|
||||
$sync=T, $class=node,
|
||||
|
@ -87,6 +95,7 @@ event bro_init() &priority=9
|
|||
if ( n$node_type == TIME_MACHINE &&
|
||||
me?$time_machine && me$time_machine == i )
|
||||
Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
|
||||
$zone_id=nodes[i]$zone_id,
|
||||
$p=nodes[i]$p,
|
||||
$connect=T,
|
||||
$retry=1min,
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
##! and/or transfer events.
|
||||
|
||||
@load base/frameworks/packet-filter
|
||||
@load base/utils/addrs
|
||||
|
||||
module Communication;
|
||||
|
||||
|
@ -9,17 +10,31 @@ export {
|
|||
|
||||
## The communication logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
## Which interface to listen on (0.0.0.0 for any interface).
|
||||
|
||||
## Which interface to listen on. The addresses ``0.0.0.0`` and ``[::]``
|
||||
## are wildcards.
|
||||
const listen_interface = 0.0.0.0 &redef;
|
||||
|
||||
|
||||
## Which port to listen on.
|
||||
const listen_port = 47757/tcp &redef;
|
||||
|
||||
|
||||
## This defines if a listening socket should use SSL.
|
||||
const listen_ssl = F &redef;
|
||||
|
||||
## Default compression level. Compression level is 0-9, with 0 = no
|
||||
## Defines if a listening socket can bind to IPv6 addresses.
|
||||
const listen_ipv6 = F &redef;
|
||||
|
||||
## If :bro:id:`Communication::listen_interface` is a non-global
|
||||
## IPv6 address and requires a specific :rfc:`4007` ``zone_id``,
|
||||
## it can be specified here.
|
||||
const listen_ipv6_zone_id = "" &redef;
|
||||
|
||||
## Defines the interval at which to retry binding to
|
||||
## :bro:id:`Communication::listen_interface` on
|
||||
## :bro:id:`Communication::listen_port` if it's already in use.
|
||||
const listen_retry = 30 secs &redef;
|
||||
|
||||
## Default compression level. Compression level is 0-9, with 0 = no
|
||||
## compression.
|
||||
global compression_level = 0 &redef;
|
||||
|
||||
|
@ -27,7 +42,7 @@ export {
|
|||
type Info: record {
|
||||
## The network time at which a communication event occurred.
|
||||
ts: time &log;
|
||||
## The peer name (if any) for which a communication event is concerned.
|
||||
## The peer name (if any) with which a communication event is concerned.
|
||||
peer: string &log &optional;
|
||||
## Where the communication event message originated from, that is,
|
||||
## either from the scripting layer or inside the Bro process.
|
||||
|
@ -51,7 +66,11 @@ export {
|
|||
type Node: record {
|
||||
## Remote address.
|
||||
host: addr;
|
||||
|
||||
|
||||
## If the *host* field is a non-global IPv6 address, this field
|
||||
## can specify a particular :rfc:`4007` ``zone_id``.
|
||||
zone_id: string &optional;
|
||||
|
||||
## Port of the remote Bro communication endpoint if we are initiating
|
||||
## the connection based on the :bro:id:`connect` field.
|
||||
p: port &optional;
|
||||
|
@ -101,7 +120,7 @@ export {
|
|||
|
||||
## The remote peer.
|
||||
peer: event_peer &optional;
|
||||
|
||||
|
||||
## Indicates the status of the node.
|
||||
connected: bool &default = F;
|
||||
};
|
||||
|
@ -144,7 +163,7 @@ event bro_init() &priority=5
|
|||
|
||||
function do_script_log_common(level: count, src: count, msg: string)
|
||||
{
|
||||
Log::write(Communication::LOG, [$ts = network_time(),
|
||||
Log::write(Communication::LOG, [$ts = network_time(),
|
||||
$level = (level == REMOTE_LOG_INFO ? "info" : "error"),
|
||||
$src_name = src_names[src],
|
||||
$peer = get_event_peer()$descr,
|
||||
|
@ -160,7 +179,7 @@ event remote_log(level: count, src: count, msg: string)
|
|||
# This is a core generated event.
|
||||
event remote_log_peer(p: event_peer, level: count, src: count, msg: string)
|
||||
{
|
||||
local rmsg = fmt("[#%d/%s:%d] %s", p$id, p$host, p$p, msg);
|
||||
local rmsg = fmt("[#%d/%s:%d] %s", p$id, addr_to_uri(p$host), p$p, msg);
|
||||
do_script_log_common(level, src, rmsg);
|
||||
}
|
||||
|
||||
|
@ -178,10 +197,11 @@ function connect_peer(peer: string)
|
|||
p = node$p;
|
||||
|
||||
local class = node?$class ? node$class : "";
|
||||
local id = connect(node$host, p, class, node$retry, node$ssl);
|
||||
|
||||
local zone_id = node?$zone_id ? node$zone_id : "";
|
||||
local id = connect(node$host, zone_id, p, class, node$retry, node$ssl);
|
||||
|
||||
if ( id == PEER_ID_NONE )
|
||||
Log::write(Communication::LOG, [$ts = network_time(),
|
||||
Log::write(Communication::LOG, [$ts = network_time(),
|
||||
$peer = get_event_peer()$descr,
|
||||
$message = "can't trigger connect"]);
|
||||
pending_peers[id] = node;
|
||||
|
@ -317,7 +337,7 @@ event bro_init() &priority = -10 # let others modify nodes
|
|||
{
|
||||
if ( |nodes| > 0 )
|
||||
enable_communication();
|
||||
|
||||
|
||||
for ( tag in nodes )
|
||||
{
|
||||
if ( ! nodes[tag]$connect )
|
||||
|
|
|
@ -11,6 +11,10 @@ export {
|
|||
## The port of the host that will be controlled.
|
||||
const host_port = 0/tcp &redef;
|
||||
|
||||
## If :bro:id:`Control::host` is a non-global IPv6 address and
|
||||
## requires a specific :rfc:`4007` ``zone_id``, it can be set here.
|
||||
const zone_id = "" &redef;
|
||||
|
||||
## The command that is being done. It's typically set on the
|
||||
## command line.
|
||||
const cmd = "" &redef;
|
||||
|
|
|
@ -149,3 +149,64 @@ signature dpd_ssl_client {
|
|||
payload /^(\x16\x03[\x00\x01\x02]..\x01...\x03[\x00\x01\x02]|...?\x01[\x00\x01\x02][\x02\x03]).*/
|
||||
tcp-state originator
|
||||
}
|
||||
|
||||
signature dpd_ayiya {
|
||||
ip-proto = udp
|
||||
payload /^..\x11\x29/
|
||||
enable "ayiya"
|
||||
}
|
||||
|
||||
signature dpd_teredo {
|
||||
ip-proto = udp
|
||||
payload /^(\x00\x00)|(\x00\x01)|([\x60-\x6f])/
|
||||
enable "teredo"
|
||||
}
|
||||
|
||||
signature dpd_socks4_client {
|
||||
ip-proto == tcp
|
||||
# '32' is a rather arbitrary max length for the user name.
|
||||
payload /^\x04[\x01\x02].{0,32}\x00/
|
||||
tcp-state originator
|
||||
}
|
||||
|
||||
signature dpd_socks4_server {
|
||||
ip-proto == tcp
|
||||
requires-reverse-signature dpd_socks4_client
|
||||
payload /^\x00[\x5a\x5b\x5c\x5d]/
|
||||
tcp-state responder
|
||||
enable "socks"
|
||||
}
|
||||
|
||||
signature dpd_socks4_reverse_client {
|
||||
ip-proto == tcp
|
||||
# '32' is a rather arbitrary max length for the user name.
|
||||
payload /^\x04[\x01\x02].{0,32}\x00/
|
||||
tcp-state responder
|
||||
}
|
||||
|
||||
signature dpd_socks4_reverse_server {
|
||||
ip-proto == tcp
|
||||
requires-reverse-signature dpd_socks4_reverse_client
|
||||
payload /^\x00[\x5a\x5b\x5c\x5d]/
|
||||
tcp-state originator
|
||||
enable "socks"
|
||||
}
|
||||
|
||||
signature dpd_socks5_client {
|
||||
ip-proto == tcp
|
||||
# Watch for a few authentication methods to reduce false positives.
|
||||
payload /^\x05.[\x00\x01\x02]/
|
||||
tcp-state originator
|
||||
}
|
||||
|
||||
signature dpd_socks5_server {
|
||||
ip-proto == tcp
|
||||
requires-reverse-signature dpd_socks5_client
|
||||
# Watch for a single authentication method to be chosen by the server or
|
||||
# the server to indicate the no authentication is required.
|
||||
payload /^\x05(\x00|\x01[\x00\x01\x02])/
|
||||
tcp-state responder
|
||||
enable "socks"
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -3,8 +3,7 @@
|
|||
|
||||
module DPD;
|
||||
|
||||
## Add the DPD signatures to the signature framework.
|
||||
redef signature_files += "base/frameworks/dpd/dpd.sig";
|
||||
@load-sigs ./dpd.sig
|
||||
|
||||
export {
|
||||
## Add the DPD logging stream identifier.
|
||||
|
@ -105,5 +104,8 @@ event protocol_violation(c: connection, atype: count, aid: count,
|
|||
reason: string) &priority=-5
|
||||
{
|
||||
if ( c?$dpd )
|
||||
{
|
||||
Log::write(DPD::LOG, c$dpd);
|
||||
delete c$dpd;
|
||||
}
|
||||
}
|
||||
|
|
5
scripts/base/frameworks/input/__load__.bro
Normal file
5
scripts/base/frameworks/input/__load__.bro
Normal file
|
@ -0,0 +1,5 @@
|
|||
@load ./main
|
||||
@load ./readers/ascii
|
||||
@load ./readers/raw
|
||||
@load ./readers/benchmark
|
||||
|
158
scripts/base/frameworks/input/main.bro
Normal file
158
scripts/base/frameworks/input/main.bro
Normal file
|
@ -0,0 +1,158 @@
|
|||
##! The input framework provides a way to read previously stored data either
|
||||
##! as an event stream or into a bro table.
|
||||
|
||||
module Input;
|
||||
|
||||
export {
|
||||
|
||||
## The default input reader used. Defaults to `READER_ASCII`.
|
||||
const default_reader = READER_ASCII &redef;
|
||||
|
||||
## The default reader mode used. Defaults to `MANUAL`.
|
||||
const default_mode = MANUAL &redef;
|
||||
|
||||
## Flag that controls if the input framework accepts records
|
||||
## that contain types that are not supported (at the moment
|
||||
## file and function). If true, the input framework will
|
||||
## warn in these cases, but continue. If false, it will
|
||||
## abort. Defaults to false (abort)
|
||||
const accept_unsupported_types = F &redef;
|
||||
|
||||
## TableFilter description type used for the `table` method.
|
||||
type TableDescription: record {
|
||||
## Common definitions for tables and events
|
||||
|
||||
## String that allows the reader to find the source.
|
||||
## For `READER_ASCII`, this is the filename.
|
||||
source: string;
|
||||
|
||||
## Reader to use for this stream
|
||||
reader: Reader &default=default_reader;
|
||||
|
||||
## Read mode to use for this stream
|
||||
mode: Mode &default=default_mode;
|
||||
|
||||
## Descriptive name. Used to remove a stream at a later time
|
||||
name: string;
|
||||
|
||||
# Special definitions for tables
|
||||
|
||||
## Table which will receive the data read by the input framework
|
||||
destination: any;
|
||||
|
||||
## Record that defines the values used as the index of the table
|
||||
idx: any;
|
||||
|
||||
## Record that defines the values used as the elements of the table
|
||||
## If val is undefined, destination has to be a set.
|
||||
val: any &optional;
|
||||
|
||||
## Defines if the value of the table is a record (default), or a single value. Val
|
||||
## can only contain one element when this is set to false.
|
||||
want_record: bool &default=T;
|
||||
|
||||
## The event that is raised each time a value is added to, changed in or removed
|
||||
## from the table. The event will receive an Input::Event enum as the first
|
||||
## argument, the idx record as the second argument and the value (record) as the
|
||||
## third argument.
|
||||
ev: any &optional; # event containing idx, val as values.
|
||||
|
||||
## Predicate function that can decide if an insertion, update or removal should
|
||||
## really be executed. Parameters are the same as for the event. If true is
|
||||
## returned, the update is performed. If false is returned, it is skipped.
|
||||
pred: function(typ: Input::Event, left: any, right: any): bool &optional;
|
||||
|
||||
## A key/value table that will be passed on the reader.
|
||||
## Interpretation of the values is left to the writer, but
|
||||
## usually they will be used for configuration purposes.
|
||||
config: table[string] of string &default=table();
|
||||
};
|
||||
|
||||
## EventFilter description type used for the `event` method.
|
||||
type EventDescription: record {
|
||||
## Common definitions for tables and events
|
||||
|
||||
## String that allows the reader to find the source.
|
||||
## For `READER_ASCII`, this is the filename.
|
||||
source: string;
|
||||
|
||||
## Reader to use for this steam
|
||||
reader: Reader &default=default_reader;
|
||||
|
||||
## Read mode to use for this stream
|
||||
mode: Mode &default=default_mode;
|
||||
|
||||
## Descriptive name. Used to remove a stream at a later time
|
||||
name: string;
|
||||
|
||||
# Special definitions for events
|
||||
|
||||
## Record describing the fields to be retrieved from the source input.
|
||||
fields: any;
|
||||
|
||||
## If want_record if false, the event receives each value in fields as a separate argument.
|
||||
## If it is set to true (default), the event receives all fields in a single record value.
|
||||
want_record: bool &default=T;
|
||||
|
||||
## The event that is raised each time a new line is received from the reader.
|
||||
## The event will receive an Input::Event enum as the first element, and the fields as the following arguments.
|
||||
ev: any;
|
||||
|
||||
## A key/value table that will be passed on the reader.
|
||||
## Interpretation of the values is left to the writer, but
|
||||
## usually they will be used for configuration purposes.
|
||||
config: table[string] of string &default=table();
|
||||
};
|
||||
|
||||
## Create a new table input from a given source. Returns true on success.
|
||||
##
|
||||
## description: `TableDescription` record describing the source.
|
||||
global add_table: function(description: Input::TableDescription) : bool;
|
||||
|
||||
## Create a new event input from a given source. Returns true on success.
|
||||
##
|
||||
## description: `TableDescription` record describing the source.
|
||||
global add_event: function(description: Input::EventDescription) : bool;
|
||||
|
||||
## Remove a input stream. Returns true on success and false if the named stream was
|
||||
## not found.
|
||||
##
|
||||
## id: string value identifying the stream to be removed
|
||||
global remove: function(id: string) : bool;
|
||||
|
||||
## Forces the current input to be checked for changes.
|
||||
## Returns true on success and false if the named stream was not found
|
||||
##
|
||||
## id: string value identifying the stream
|
||||
global force_update: function(id: string) : bool;
|
||||
|
||||
## Event that is called, when the end of a data source has been reached, including
|
||||
## after an update.
|
||||
global end_of_data: event(name: string, source:string);
|
||||
}
|
||||
|
||||
@load base/input.bif
|
||||
|
||||
|
||||
module Input;
|
||||
|
||||
function add_table(description: Input::TableDescription) : bool
|
||||
{
|
||||
return __create_table_stream(description);
|
||||
}
|
||||
|
||||
function add_event(description: Input::EventDescription) : bool
|
||||
{
|
||||
return __create_event_stream(description);
|
||||
}
|
||||
|
||||
function remove(id: string) : bool
|
||||
{
|
||||
return __remove_stream(id);
|
||||
}
|
||||
|
||||
function force_update(id: string) : bool
|
||||
{
|
||||
return __force_update(id);
|
||||
}
|
||||
|
21
scripts/base/frameworks/input/readers/ascii.bro
Normal file
21
scripts/base/frameworks/input/readers/ascii.bro
Normal file
|
@ -0,0 +1,21 @@
|
|||
##! Interface for the ascii input reader.
|
||||
##!
|
||||
##! The defaults are set to match Bro's ASCII output.
|
||||
|
||||
module InputAscii;
|
||||
|
||||
export {
|
||||
## Separator between fields.
|
||||
## Please note that the separator has to be exactly one character long
|
||||
const separator = "\t" &redef;
|
||||
|
||||
## Separator between set elements.
|
||||
## Please note that the separator has to be exactly one character long
|
||||
const set_separator = "," &redef;
|
||||
|
||||
## String to use for empty fields.
|
||||
const empty_field = "(empty)" &redef;
|
||||
|
||||
## String to use for an unset &optional field.
|
||||
const unset_field = "-" &redef;
|
||||
}
|
23
scripts/base/frameworks/input/readers/benchmark.bro
Normal file
23
scripts/base/frameworks/input/readers/benchmark.bro
Normal file
|
@ -0,0 +1,23 @@
|
|||
##! Interface for the ascii input reader.
|
||||
|
||||
module InputBenchmark;
|
||||
|
||||
export {
|
||||
## multiplication factor for each second
|
||||
const factor = 1.0 &redef;
|
||||
|
||||
## spread factor between lines
|
||||
const spread = 0 &redef;
|
||||
|
||||
## spreading where usleep = 1000000 / autospread * num_lines
|
||||
const autospread = 0.0 &redef;
|
||||
|
||||
## addition factor for each heartbeat
|
||||
const addfactor = 0 &redef;
|
||||
|
||||
## stop spreading at x lines per heartbeat
|
||||
const stopspreadat = 0 &redef;
|
||||
|
||||
## 1 -> enable timed spreading
|
||||
const timedspread = 0.0 &redef;
|
||||
}
|
9
scripts/base/frameworks/input/readers/raw.bro
Normal file
9
scripts/base/frameworks/input/readers/raw.bro
Normal file
|
@ -0,0 +1,9 @@
|
|||
##! Interface for the raw input reader.
|
||||
|
||||
module InputRaw;
|
||||
|
||||
export {
|
||||
## Separator between input records.
|
||||
## Please note that the separator has to be exactly one character long
|
||||
const record_separator = "\n" &redef;
|
||||
}
|
|
@ -1 +1,11 @@
|
|||
@load ./main
|
||||
@load ./main
|
||||
|
||||
# The cluster framework must be loaded first.
|
||||
@load base/frameworks/cluster
|
||||
|
||||
@if ( Cluster::is_enabled() )
|
||||
@load ./cluster
|
||||
@endif
|
||||
|
||||
# This needs cluster support to only read on the manager.
|
||||
@load ./input
|
||||
|
|
61
scripts/base/frameworks/intel/cluster.bro
Normal file
61
scripts/base/frameworks/intel/cluster.bro
Normal file
|
@ -0,0 +1,61 @@
|
|||
##! Cluster transparency support for the intelligence framework. This is mostly oriented
|
||||
##! toward distributing intelligence information across clusters.
|
||||
|
||||
@load base/frameworks/cluster
|
||||
@load ./input
|
||||
|
||||
module Intel;
|
||||
|
||||
redef record Item += {
|
||||
## This field is used internally for cluster transparency to avoid
|
||||
## re-dispatching intelligence items over and over from workers.
|
||||
first_dispatch: bool &default=T;
|
||||
};
|
||||
|
||||
# If this process is not a manager process, we don't want the full metadata
|
||||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
redef have_full_data = F;
|
||||
@endif
|
||||
|
||||
global cluster_new_item: event(item: Item);
|
||||
|
||||
# Primary intelligence distribution comes from manager.
|
||||
redef Cluster::manager2worker_events += /^Intel::(cluster_new_item)$/;
|
||||
# If a worker finds intelligence and adds it, it should share it back to the manager.
|
||||
redef Cluster::worker2manager_events += /^Intel::(cluster_new_item|match_no_items)$/;
|
||||
|
||||
@if ( Cluster::local_node_type() == Cluster::MANAGER )
|
||||
event Intel::match_no_items(s: Seen) &priority=5
|
||||
{
|
||||
event Intel::match(s, Intel::get_items(s));
|
||||
}
|
||||
|
||||
event remote_connection_handshake_done(p: event_peer)
|
||||
{
|
||||
# When a worker connects, send it the complete minimal data store.
|
||||
# It will be kept up to date after this by the cluster_new_item event.
|
||||
if ( Cluster::nodes[p$descr]$node_type == Cluster::WORKER )
|
||||
{
|
||||
send_id(p, "Intel::min_data_store");
|
||||
}
|
||||
}
|
||||
@endif
|
||||
|
||||
event Intel::cluster_new_item(item: Intel::Item) &priority=5
|
||||
{
|
||||
# Ignore locally generated events to avoid event storms.
|
||||
if ( is_remote_event() )
|
||||
Intel::insert(item);
|
||||
}
|
||||
|
||||
event Intel::new_item(item: Intel::Item) &priority=5
|
||||
{
|
||||
# The cluster manager always rebroadcasts intelligence.
|
||||
# Workers redistribute it if it was locally generated.
|
||||
if ( Cluster::local_node_type() == Cluster::MANAGER ||
|
||||
item$first_dispatch )
|
||||
{
|
||||
item$first_dispatch=F;
|
||||
event Intel::cluster_new_item(item);
|
||||
}
|
||||
}
|
33
scripts/base/frameworks/intel/input.bro
Normal file
33
scripts/base/frameworks/intel/input.bro
Normal file
|
@ -0,0 +1,33 @@
|
|||
@load ./main
|
||||
|
||||
module Intel;
|
||||
|
||||
export {
|
||||
## Intelligence files that will be read off disk. The files are
|
||||
## reread everytime they are updated so updates much be atomic with
|
||||
## "mv" instead of writing the file in place.
|
||||
const read_files: set[string] = {} &redef;
|
||||
}
|
||||
|
||||
event Intel::read_entry(desc: Input::EventDescription, tpe: Input::Event, item: Intel::Item)
|
||||
{
|
||||
Intel::insert(item);
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
if ( ! Cluster::is_enabled() ||
|
||||
Cluster::local_node_type() == Cluster::MANAGER )
|
||||
{
|
||||
for ( a_file in read_files )
|
||||
{
|
||||
Input::add_event([$source=a_file,
|
||||
$reader=Input::READER_ASCII,
|
||||
$mode=Input::REREAD,
|
||||
$name=cat("intel-", a_file),
|
||||
$fields=Intel::Item,
|
||||
$ev=Intel::read_entry]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,323 +1,345 @@
|
|||
##! The intelligence framework provides a way to store and query IP addresses,
|
||||
##! strings (with a subtype), and numeric (with a subtype) data. Metadata
|
||||
##! also be associated with the intelligence like tags which are arbitrary
|
||||
##! strings, time values, and longer descriptive strings.
|
||||
|
||||
# Example string subtypes:
|
||||
# url
|
||||
# email
|
||||
# domain
|
||||
# software
|
||||
# user_name
|
||||
# file_name
|
||||
# file_md5
|
||||
# x509_md5
|
||||
|
||||
# Example tags:
|
||||
# infrastructure
|
||||
# malicious
|
||||
# sensitive
|
||||
# canary
|
||||
# friend
|
||||
##! and strings (with a str_type). Metadata can
|
||||
##! also be associated with the intelligence like for making more informed
|
||||
##! decisions about matching and handling of intelligence.
|
||||
|
||||
@load base/frameworks/notice
|
||||
|
||||
module Intel;
|
||||
|
||||
export {
|
||||
## The intel logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
redef enum Notice::Type += {
|
||||
## This notice should be used in all detector scripts to indicate
|
||||
## an intelligence based detection.
|
||||
Detection,
|
||||
## String data needs to be further categoried since it could represent
|
||||
## and number of types of data.
|
||||
type StrType: enum {
|
||||
## A complete URL without the prefix "http://".
|
||||
URL,
|
||||
## User-Agent string, typically HTTP or mail message body.
|
||||
USER_AGENT,
|
||||
## Email address.
|
||||
EMAIL,
|
||||
## DNS domain name.
|
||||
DOMAIN,
|
||||
## A user name.
|
||||
USER_NAME,
|
||||
## File hash which is non-hash type specific. It's up to the user to query
|
||||
## for any relevant hash types.
|
||||
FILE_HASH,
|
||||
## Certificate SHA-1 hash.
|
||||
CERT_HASH,
|
||||
};
|
||||
|
||||
## Record type used for logging information from the intelligence framework.
|
||||
## Primarily for problems or oddities with inserting and querying data.
|
||||
## This is important since the content of the intelligence framework can
|
||||
## change quite dramatically during runtime and problems may be introduced
|
||||
## into the data.
|
||||
type Info: record {
|
||||
## The current network time.
|
||||
ts: time &log;
|
||||
## Represents the severity of the message.
|
||||
## This value should be one of: "info", "warn", "error"
|
||||
level: string &log;
|
||||
## The message.
|
||||
message: string &log;
|
||||
};
|
||||
|
||||
## Record to represent metadata associated with a single piece of
|
||||
## intelligence.
|
||||
## Data about an :bro:type:`Intel::Item`
|
||||
type MetaData: record {
|
||||
## A description for the data.
|
||||
## An arbitrary string value representing the data source. Typically,
|
||||
## the convention for this field will be the source name and feed name
|
||||
## separated by a hyphen. For example: "source1-c&c".
|
||||
source: string;
|
||||
## A freeform description for the data.
|
||||
desc: string &optional;
|
||||
## A URL where more information may be found about the intelligence.
|
||||
## A URL for more information about the data.
|
||||
url: string &optional;
|
||||
## The time at which the data was first declared to be intelligence.
|
||||
first_seen: time &optional;
|
||||
## When this data was most recent inserted into the framework.
|
||||
latest_seen: time &optional;
|
||||
## Arbitrary text tags for the data.
|
||||
tags: set[string];
|
||||
};
|
||||
|
||||
## Record to represent a singular piece of intelligence.
|
||||
## Represents a piece of intelligence.
|
||||
type Item: record {
|
||||
## If the data is an IP address, this hold the address.
|
||||
ip: addr &optional;
|
||||
## If the data is textual, this holds the text.
|
||||
str: string &optional;
|
||||
## If the data is numeric, this holds the number.
|
||||
num: int &optional;
|
||||
## The subtype of the data for when either the $str or $num fields are
|
||||
## given. If one of those fields are given, this field must be present.
|
||||
subtype: string &optional;
|
||||
## The IP address if the intelligence is about an IP address.
|
||||
host: addr &optional;
|
||||
## The network if the intelligence is about a CIDR block.
|
||||
net: subnet &optional;
|
||||
## The string if the intelligence is about a string.
|
||||
str: string &optional;
|
||||
## The type of data that is in the string if the $str field is set.
|
||||
str_type: StrType &optional;
|
||||
|
||||
## The next five fields are temporary until a better model for
|
||||
## attaching metadata to an intelligence item is created.
|
||||
desc: string &optional;
|
||||
url: string &optional;
|
||||
first_seen: time &optional;
|
||||
latest_seen: time &optional;
|
||||
tags: set[string];
|
||||
|
||||
## These single string tags are throw away until pybroccoli supports sets.
|
||||
tag1: string &optional;
|
||||
tag2: string &optional;
|
||||
tag3: string &optional;
|
||||
## Metadata for the item. Typically represents more deeply \
|
||||
## descriptive data for a piece of intelligence.
|
||||
meta: MetaData;
|
||||
};
|
||||
|
||||
## Record model used for constructing queries against the intelligence
|
||||
## framework.
|
||||
type QueryItem: record {
|
||||
## If an IP address is being queried for, this field should be given.
|
||||
ip: addr &optional;
|
||||
## If a string is being queried for, this field should be given.
|
||||
str: string &optional;
|
||||
## If numeric data is being queried for, this field should be given.
|
||||
num: int &optional;
|
||||
## If either a string or number is being queried for, this field should
|
||||
## indicate the subtype of the data.
|
||||
subtype: string &optional;
|
||||
|
||||
## A set of tags where if a single metadata record attached to an item
|
||||
## has any one of the tags defined in this field, it will match.
|
||||
or_tags: set[string] &optional;
|
||||
## A set of tags where a single metadata record attached to an item
|
||||
## must have all of the tags defined in this field.
|
||||
and_tags: set[string] &optional;
|
||||
|
||||
## The predicate can be given when searching for a match. It will
|
||||
## be tested against every :bro:type:`Intel::MetaData` item associated
|
||||
## with the data being matched on. If it returns T a single time, the
|
||||
## matcher will consider that the item has matched. This field can
|
||||
## be used for constructing arbitrarily complex queries that may not
|
||||
## be possible with the $or_tags or $and_tags fields.
|
||||
pred: function(meta: Intel::MetaData): bool &optional;
|
||||
## Enum to represent where data came from when it was discovered.
|
||||
## The convenction is to prefix the name with "IN_".
|
||||
type Where: enum {
|
||||
## A catchall value to represent data of unknown provenance.
|
||||
IN_ANYWHERE,
|
||||
};
|
||||
|
||||
## Function to insert data into the intelligence framework.
|
||||
##
|
||||
## item: The data item.
|
||||
|
||||
## The $host field and combination of $str and $str_type fields are mutually
|
||||
## exclusive. These records *must* represent either an IP address being
|
||||
## seen or a string being seen.
|
||||
type Seen: record {
|
||||
## The IP address if the data seen is an IP address.
|
||||
host: addr &log &optional;
|
||||
## The string if the data is about a string.
|
||||
str: string &log &optional;
|
||||
## The type of data that is in the string if the $str field is set.
|
||||
str_type: StrType &log &optional;
|
||||
|
||||
## Where the data was discovered.
|
||||
where: Where &log;
|
||||
|
||||
## If the data was discovered within a connection, the
|
||||
## connection record should go into get to give context to the data.
|
||||
conn: connection &optional;
|
||||
};
|
||||
|
||||
## Record used for the logging framework representing a positive
|
||||
## hit within the intelligence framework.
|
||||
type Info: record {
|
||||
## Timestamp when the data was discovered.
|
||||
ts: time &log;
|
||||
|
||||
## If a connection was associated with this intelligence hit,
|
||||
## this is the uid for the connection
|
||||
uid: string &log &optional;
|
||||
## If a connection was associated with this intelligence hit,
|
||||
## this is the conn_id for the connection.
|
||||
id: conn_id &log &optional;
|
||||
|
||||
## Where the data was seen.
|
||||
seen: Seen &log;
|
||||
## Sources which supplied data that resulted in this match.
|
||||
sources: set[string] &log;
|
||||
};
|
||||
|
||||
## Intelligence data manipulation functions.
|
||||
global insert: function(item: Item);
|
||||
|
||||
## Function to declare discovery of a piece of data in order to check
|
||||
## it against known intelligence for matches.
|
||||
global seen: function(s: Seen);
|
||||
|
||||
## Event to represent a match in the intelligence data from data that was seen.
|
||||
## On clusters there is no assurance as to where this event will be generated
|
||||
## so do not assume that arbitrary global state beyond the given data
|
||||
## will be available.
|
||||
##
|
||||
## Returns: T if the data was successfully inserted into the framework,
|
||||
## otherwise it returns F.
|
||||
global insert: function(item: Item): bool;
|
||||
|
||||
## A wrapper for the :bro:id:`Intel::insert` function. This is primarily
|
||||
## used as the external API for inserting data into the intelligence
|
||||
## using Broccoli.
|
||||
global insert_event: event(item: Item);
|
||||
|
||||
## Function for matching data within the intelligence framework.
|
||||
global matcher: function(item: QueryItem): bool;
|
||||
## This is the primary mechanism where a user will take actions based on data
|
||||
## within the intelligence framework.
|
||||
global match: event(s: Seen, items: set[Item]);
|
||||
|
||||
global log_intel: event(rec: Info);
|
||||
}
|
||||
|
||||
type MetaDataStore: table[count] of MetaData;
|
||||
# Internal handler for matches with no metadata available.
|
||||
global match_no_items: event(s: Seen);
|
||||
|
||||
# Internal events for cluster data distribution
|
||||
global new_item: event(item: Item);
|
||||
global updated_item: event(item: Item);
|
||||
|
||||
# Optionally store metadata. This is used internally depending on
|
||||
# if this is a cluster deployment or not.
|
||||
const have_full_data = T &redef;
|
||||
|
||||
# The in memory data structure for holding intelligence.
|
||||
type DataStore: record {
|
||||
ip_data: table[addr] of MetaDataStore;
|
||||
# The first string is the actual value and the second string is the subtype.
|
||||
string_data: table[string, string] of MetaDataStore;
|
||||
int_data: table[int, string] of MetaDataStore;
|
||||
net_data: table[subnet] of set[MetaData];
|
||||
string_data: table[string, StrType] of set[MetaData];
|
||||
};
|
||||
global data_store: DataStore;
|
||||
global data_store: DataStore &redef;
|
||||
|
||||
event bro_init()
|
||||
# The in memory data structure for holding the barest matchable intelligence.
|
||||
# This is primarily for workers to do the initial quick matches and store
|
||||
# a minimal amount of data for the full match to happen on the manager.
|
||||
type MinDataStore: record {
|
||||
net_data: set[subnet];
|
||||
string_data: set[string, StrType];
|
||||
};
|
||||
global min_data_store: MinDataStore &redef;
|
||||
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Intel::LOG, [$columns=Info]);
|
||||
Log::create_stream(LOG, [$columns=Info, $ev=log_intel]);
|
||||
}
|
||||
|
||||
|
||||
function insert(item: Item): bool
|
||||
function find(s: Seen): bool
|
||||
{
|
||||
local err_msg = "";
|
||||
if ( (item?$str || item?$num) && ! item?$subtype )
|
||||
err_msg = "You must provide a subtype to insert_sync or this item doesn't make sense.";
|
||||
|
||||
if ( err_msg == "" )
|
||||
if ( s?$host &&
|
||||
((have_full_data && s$host in data_store$net_data) ||
|
||||
(s$host in min_data_store$net_data)))
|
||||
{
|
||||
# Create and fill out the meta data item.
|
||||
local meta: MetaData;
|
||||
if ( item?$first_seen )
|
||||
meta$first_seen = item$first_seen;
|
||||
if ( item?$latest_seen )
|
||||
meta$latest_seen = item$latest_seen;
|
||||
if ( item?$tags )
|
||||
meta$tags = item$tags;
|
||||
if ( item?$desc )
|
||||
meta$desc = item$desc;
|
||||
if ( item?$url )
|
||||
meta$url = item$url;
|
||||
|
||||
|
||||
# This is hopefully only temporary until pybroccoli supports sets.
|
||||
if ( item?$tag1 )
|
||||
add item$tags[item$tag1];
|
||||
if ( item?$tag2 )
|
||||
add item$tags[item$tag2];
|
||||
if ( item?$tag3 )
|
||||
add item$tags[item$tag3];
|
||||
|
||||
if ( item?$ip )
|
||||
{
|
||||
if ( item$ip !in data_store$ip_data )
|
||||
data_store$ip_data[item$ip] = table();
|
||||
data_store$ip_data[item$ip][|data_store$ip_data[item$ip]|] = meta;
|
||||
return T;
|
||||
}
|
||||
else if ( item?$str )
|
||||
{
|
||||
if ( [item$str, item$subtype] !in data_store$string_data )
|
||||
data_store$string_data[item$str, item$subtype] = table();
|
||||
|
||||
data_store$string_data[item$str, item$subtype][|data_store$string_data[item$str, item$subtype]|] = meta;
|
||||
return T;
|
||||
}
|
||||
else if ( item?$num )
|
||||
{
|
||||
if ( [item$num, item$subtype] !in data_store$int_data )
|
||||
data_store$int_data[item$num, item$subtype] = table();
|
||||
return T;
|
||||
}
|
||||
else if ( s?$str && s?$str_type &&
|
||||
((have_full_data && [s$str, s$str_type] in data_store$string_data) ||
|
||||
([s$str, s$str_type] in min_data_store$string_data)))
|
||||
{
|
||||
return T;
|
||||
}
|
||||
else
|
||||
{
|
||||
return F;
|
||||
}
|
||||
}
|
||||
|
||||
data_store$int_data[item$num, item$subtype][|data_store$int_data[item$num, item$subtype]|] = meta;
|
||||
return T;
|
||||
function get_items(s: Seen): set[Item]
|
||||
{
|
||||
local item: Item;
|
||||
local return_data: set[Item] = set();
|
||||
|
||||
if ( ! have_full_data )
|
||||
{
|
||||
# A reporter warning should be generated here because this function
|
||||
# should never be called from a host that doesn't have the full data.
|
||||
# TODO: do a reporter warning.
|
||||
return return_data;
|
||||
}
|
||||
|
||||
if ( s?$host )
|
||||
{
|
||||
# See if the host is known about and it has meta values
|
||||
if ( s$host in data_store$net_data )
|
||||
{
|
||||
for ( m in data_store$net_data[s$host] )
|
||||
{
|
||||
# TODO: the lookup should be finding all and not just most specific
|
||||
# and $host/$net should have the correct value.
|
||||
item = [$host=s$host, $meta=m];
|
||||
add return_data[item];
|
||||
}
|
||||
}
|
||||
}
|
||||
else if ( s?$str && s?$str_type )
|
||||
{
|
||||
# See if the string is known about and it has meta values
|
||||
if ( [s$str, s$str_type] in data_store$string_data )
|
||||
{
|
||||
for ( m in data_store$string_data[s$str, s$str_type] )
|
||||
{
|
||||
item = [$str=s$str, $str_type=s$str_type, $meta=m];
|
||||
add return_data[item];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return return_data;
|
||||
}
|
||||
|
||||
function Intel::seen(s: Seen)
|
||||
{
|
||||
if ( find(s) )
|
||||
{
|
||||
if ( have_full_data )
|
||||
{
|
||||
local items = get_items(s);
|
||||
event Intel::match(s, items);
|
||||
}
|
||||
else
|
||||
err_msg = "Failed to insert intelligence item for some unknown reason.";
|
||||
}
|
||||
|
||||
if ( err_msg != "" )
|
||||
Log::write(Intel::LOG, [$ts=network_time(), $level="warn", $message=fmt(err_msg)]);
|
||||
return F;
|
||||
}
|
||||
|
||||
event insert_event(item: Item)
|
||||
{
|
||||
insert(item);
|
||||
}
|
||||
|
||||
function match_item_with_metadata(item: QueryItem, meta: MetaData): bool
|
||||
{
|
||||
if ( item?$and_tags )
|
||||
{
|
||||
local matched = T;
|
||||
# Every tag given has to match in a single MetaData entry.
|
||||
for ( tag in item$and_tags )
|
||||
{
|
||||
if ( tag !in meta$tags )
|
||||
matched = F;
|
||||
event Intel::match_no_items(s);
|
||||
}
|
||||
if ( matched )
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function has_meta(check: MetaData, metas: set[MetaData]): bool
|
||||
{
|
||||
local check_hash = md5_hash(check);
|
||||
for ( m in metas )
|
||||
{
|
||||
if ( check_hash == md5_hash(m) )
|
||||
return T;
|
||||
}
|
||||
else if ( item?$or_tags )
|
||||
{
|
||||
# For OR tags, only a single tag has to match.
|
||||
for ( tag in item$or_tags )
|
||||
{
|
||||
if ( tag in meta$tags )
|
||||
return T;
|
||||
}
|
||||
}
|
||||
else if ( item?$pred )
|
||||
return item$pred(meta);
|
||||
|
||||
# This indicates some sort of failure in the query
|
||||
# The records must not be equivalent if we made it this far.
|
||||
return F;
|
||||
}
|
||||
|
||||
function matcher(item: QueryItem): bool
|
||||
|
||||
event Intel::match(s: Seen, items: set[Item]) &priority=5
|
||||
{
|
||||
local err_msg = "";
|
||||
if ( ! (item?$ip || item?$str || item?$num) )
|
||||
err_msg = "You must supply one of the $ip, $str, or $num fields to search on";
|
||||
else if ( (item?$or_tags || item?$and_tags) && item?$pred )
|
||||
err_msg = "You can't match with both tags and a predicate.";
|
||||
else if ( item?$or_tags && item?$and_tags )
|
||||
err_msg = "You can't match with both OR'd together tags and AND'd together tags";
|
||||
else if ( (item?$str || item?$num) && ! item?$subtype )
|
||||
err_msg = "You must provide a subtype to matcher or this item doesn't make sense.";
|
||||
else if ( item?$str && item?$num )
|
||||
err_msg = "You must only provide $str or $num, not both.";
|
||||
|
||||
local meta: MetaData;
|
||||
local empty_set: set[string] = set();
|
||||
local info: Info = [$ts=network_time(), $seen=s, $sources=empty_set];
|
||||
|
||||
if ( err_msg == "" )
|
||||
if ( s?$conn )
|
||||
{
|
||||
if ( item?$ip )
|
||||
{
|
||||
if ( item$ip in data_store$ip_data )
|
||||
{
|
||||
if ( ! item?$and_tags && ! item?$or_tags && ! item?$pred )
|
||||
return T;
|
||||
|
||||
for ( i in data_store$ip_data[item$ip] )
|
||||
{
|
||||
meta = data_store$ip_data[item$ip][i];
|
||||
if ( match_item_with_metadata(item, meta) )
|
||||
return T;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
else if ( item?$str )
|
||||
{
|
||||
if ( [item$str, item$subtype] in data_store$string_data )
|
||||
{
|
||||
if ( ! item?$and_tags && ! item?$or_tags && ! item?$pred )
|
||||
return T;
|
||||
|
||||
for ( i in data_store$string_data[item$str, item$subtype] )
|
||||
{
|
||||
meta = data_store$string_data[item$str, item$subtype][i];
|
||||
if ( match_item_with_metadata(item, meta) )
|
||||
return T;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
else if ( item?$num )
|
||||
{
|
||||
if ( [item$num, item$subtype] in data_store$int_data )
|
||||
{
|
||||
if ( ! item?$and_tags && ! item?$or_tags && ! item?$pred )
|
||||
return T;
|
||||
|
||||
for ( i in data_store$int_data[item$num, item$subtype] )
|
||||
{
|
||||
meta = data_store$int_data[item$num, item$subtype][i];
|
||||
if ( match_item_with_metadata(item, meta) )
|
||||
return T;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
err_msg = "Failed to query intelligence data for some unknown reason.";
|
||||
info$uid = s$conn$uid;
|
||||
info$id = s$conn$id;
|
||||
}
|
||||
|
||||
if ( err_msg != "" )
|
||||
Log::write(Intel::LOG, [$ts=network_time(), $level="error", $message=fmt(err_msg)]);
|
||||
return F;
|
||||
|
||||
for ( item in items )
|
||||
add info$sources[item$meta$source];
|
||||
|
||||
Log::write(Intel::LOG, info);
|
||||
}
|
||||
|
||||
function insert(item: Item)
|
||||
{
|
||||
if ( item?$str && !item?$str_type )
|
||||
{
|
||||
event reporter_warning(network_time(), fmt("You must provide a str_type for strings or this item doesn't make sense. Item: %s", item), "");
|
||||
return;
|
||||
}
|
||||
|
||||
# Create and fill out the meta data item.
|
||||
local meta = item$meta;
|
||||
local metas: set[MetaData];
|
||||
|
||||
if ( item?$host )
|
||||
{
|
||||
local host = mask_addr(item$host, is_v4_addr(item$host) ? 32 : 128);
|
||||
if ( have_full_data )
|
||||
{
|
||||
if ( host !in data_store$net_data )
|
||||
data_store$net_data[host] = set();
|
||||
|
||||
metas = data_store$net_data[host];
|
||||
}
|
||||
|
||||
add min_data_store$net_data[host];
|
||||
}
|
||||
else if ( item?$net )
|
||||
{
|
||||
if ( have_full_data )
|
||||
{
|
||||
if ( item$net !in data_store$net_data )
|
||||
data_store$net_data[item$net] = set();
|
||||
|
||||
metas = data_store$net_data[item$net];
|
||||
}
|
||||
|
||||
add min_data_store$net_data[item$net];
|
||||
}
|
||||
else if ( item?$str )
|
||||
{
|
||||
if ( have_full_data )
|
||||
{
|
||||
if ( [item$str, item$str_type] !in data_store$string_data )
|
||||
data_store$string_data[item$str, item$str_type] = set();
|
||||
|
||||
metas = data_store$string_data[item$str, item$str_type];
|
||||
}
|
||||
|
||||
add min_data_store$string_data[item$str, item$str_type];
|
||||
}
|
||||
|
||||
local updated = F;
|
||||
if ( have_full_data )
|
||||
{
|
||||
for ( m in metas )
|
||||
{
|
||||
if ( meta$source == m$source )
|
||||
{
|
||||
if ( has_meta(meta, metas) )
|
||||
{
|
||||
# It's the same item being inserted again.
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
# Same source, different metadata means updated item.
|
||||
updated = T;
|
||||
}
|
||||
}
|
||||
}
|
||||
add metas[item$meta];
|
||||
}
|
||||
|
||||
if ( updated )
|
||||
event Intel::updated_item(item);
|
||||
else
|
||||
event Intel::new_item(item);
|
||||
}
|
||||
|
|
@ -1,3 +1,6 @@
|
|||
@load ./main
|
||||
@load ./postprocessors
|
||||
@load ./writers/ascii
|
||||
@load ./writers/dataseries
|
||||
@load ./writers/elasticsearch
|
||||
@load ./writers/none
|
||||
|
|
|
@ -60,6 +60,9 @@ export {
|
|||
## Default rotation interval. Zero disables rotation.
|
||||
const default_rotation_interval = 0secs &redef;
|
||||
|
||||
## Default alarm summary mail interval. Zero disables alarm summary mails.
|
||||
const default_mail_alarms_interval = 0secs &redef;
|
||||
|
||||
## Default naming format for timestamps embedded into filenames.
|
||||
## Uses a ``strftime()`` style.
|
||||
const default_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef;
|
||||
|
@ -96,6 +99,12 @@ export {
|
|||
## file name. Generally, filenames are expected to given
|
||||
## without any extensions; writers will add appropiate
|
||||
## extensions automatically.
|
||||
##
|
||||
## If this path is found to conflict with another filter's
|
||||
## for the same writer type, it is automatically corrected
|
||||
## by appending "-N", where N is the smallest integer greater
|
||||
## or equal to 2 that allows the corrected path name to not
|
||||
## conflict with another filter's.
|
||||
path: string &optional;
|
||||
|
||||
## A function returning the output path for recording entries
|
||||
|
@ -115,7 +124,10 @@ export {
|
|||
## rec: An instance of the streams's ``columns`` type with its
|
||||
## fields set to the values to be logged.
|
||||
##
|
||||
## Returns: The path to be used for the filter.
|
||||
## Returns: The path to be used for the filter, which will be subject
|
||||
## to the same automatic correction rules as the *path*
|
||||
## field of :bro:type:`Log::Filter` in the case of conflicts
|
||||
## with other filters trying to use the same writer/path pair.
|
||||
path_func: function(id: ID, path: string, rec: any): string &optional;
|
||||
|
||||
## Subset of column names to record. If not given, all
|
||||
|
@ -138,6 +150,11 @@ export {
|
|||
## Callback function to trigger for rotated files. If not set, the
|
||||
## default comes out of :bro:id:`Log::default_rotation_postprocessors`.
|
||||
postprocessor: function(info: RotationInfo) : bool &optional;
|
||||
|
||||
## A key/value table that will be passed on to the writer.
|
||||
## Interpretation of the values is left to the writer, but
|
||||
## usually they will be used for configuration purposes.
|
||||
config: table[string] of string &default=table();
|
||||
};
|
||||
|
||||
## Sentinel value for indicating that a filter was not found when looked up.
|
||||
|
@ -313,6 +330,11 @@ export {
|
|||
## Log::default_rotation_postprocessor_cmd
|
||||
## Log::default_rotation_postprocessors
|
||||
global run_rotation_postprocessor_cmd: function(info: RotationInfo, npath: string) : bool;
|
||||
|
||||
## The streams which are currently active and not disabled.
|
||||
## This table is not meant to be modified by users! Only use it for
|
||||
## examining which streams are active.
|
||||
global active_streams: table[ID] of Stream = table();
|
||||
}
|
||||
|
||||
# We keep a script-level copy of all filters so that we can manipulate them.
|
||||
|
@ -327,20 +349,23 @@ function __default_rotation_postprocessor(info: RotationInfo) : bool
|
|||
{
|
||||
if ( info$writer in default_rotation_postprocessors )
|
||||
return default_rotation_postprocessors[info$writer](info);
|
||||
else
|
||||
# Return T by default so that postprocessor-less writers don't shutdown.
|
||||
return T;
|
||||
}
|
||||
|
||||
function default_path_func(id: ID, path: string, rec: any) : string
|
||||
{
|
||||
# The suggested path value is a previous result of this function
|
||||
# or a filter path explicitly set by the user, so continue using it.
|
||||
if ( path != "" )
|
||||
return path;
|
||||
|
||||
local id_str = fmt("%s", id);
|
||||
|
||||
|
||||
local parts = split1(id_str, /::/);
|
||||
if ( |parts| == 2 )
|
||||
{
|
||||
# The suggested path value is a previous result of this function
|
||||
# or a filter path explicitly set by the user, so continue using it.
|
||||
if ( path != "" )
|
||||
return path;
|
||||
|
||||
# Example: Notice::LOG -> "notice"
|
||||
if ( parts[2] == "LOG" )
|
||||
{
|
||||
|
@ -356,11 +381,11 @@ function default_path_func(id: ID, path: string, rec: any) : string
|
|||
output = cat(output, sub_bytes(module_parts[4],1,1), "_", sub_bytes(module_parts[4], 2, |module_parts[4]|));
|
||||
return to_lower(output);
|
||||
}
|
||||
|
||||
|
||||
# Example: Notice::POLICY_LOG -> "notice_policy"
|
||||
if ( /_LOG$/ in parts[2] )
|
||||
parts[2] = sub(parts[2], /_LOG$/, "");
|
||||
|
||||
|
||||
return cat(to_lower(parts[1]),"_",to_lower(parts[2]));
|
||||
}
|
||||
else
|
||||
|
@ -376,13 +401,16 @@ function run_rotation_postprocessor_cmd(info: RotationInfo, npath: string) : boo
|
|||
if ( pp_cmd == "" )
|
||||
return T;
|
||||
|
||||
# Turn, e.g., Log::WRITER_ASCII into "ascii".
|
||||
local writer = subst_string(to_lower(fmt("%s", info$writer)), "log::writer_", "");
|
||||
|
||||
# The date format is hard-coded here to provide a standardized
|
||||
# script interface.
|
||||
system(fmt("%s %s %s %s %s %d",
|
||||
system(fmt("%s %s %s %s %s %d %s",
|
||||
pp_cmd, npath, info$path,
|
||||
strftime("%y-%m-%d_%H.%M.%S", info$open),
|
||||
strftime("%y-%m-%d_%H.%M.%S", info$close),
|
||||
info$terminating));
|
||||
info$terminating, writer));
|
||||
|
||||
return T;
|
||||
}
|
||||
|
@ -392,11 +420,15 @@ function create_stream(id: ID, stream: Stream) : bool
|
|||
if ( ! __create_stream(id, stream) )
|
||||
return F;
|
||||
|
||||
active_streams[id] = stream;
|
||||
|
||||
return add_default_filter(id);
|
||||
}
|
||||
|
||||
function disable_stream(id: ID) : bool
|
||||
{
|
||||
delete active_streams[id];
|
||||
|
||||
return __disable_stream(id);
|
||||
}
|
||||
|
||||
|
@ -407,7 +439,7 @@ function add_filter(id: ID, filter: Filter) : bool
|
|||
# definition.
|
||||
if ( ! filter?$path_func )
|
||||
filter$path_func = default_path_func;
|
||||
|
||||
|
||||
filters[id, filter$name] = filter;
|
||||
return __add_filter(id, filter);
|
||||
}
|
||||
|
|
|
@ -8,12 +8,13 @@ export {
|
|||
## into files. This is primarily for debugging purposes.
|
||||
const output_to_stdout = F &redef;
|
||||
|
||||
## If true, include a header line with column names and description
|
||||
## of the other ASCII logging options that were used.
|
||||
const include_header = T &redef;
|
||||
## If true, include lines with log meta information such as column names with
|
||||
## types, the values of ASCII logging options that in use, and the time when the
|
||||
## file was opened and closes (the latter at the end).
|
||||
const include_meta = T &redef;
|
||||
|
||||
## Prefix for the header line if included.
|
||||
const header_prefix = "#" &redef;
|
||||
## Prefix for lines with meta information.
|
||||
const meta_prefix = "#" &redef;
|
||||
|
||||
## Separator between fields.
|
||||
const separator = "\t" &redef;
|
||||
|
|
60
scripts/base/frameworks/logging/writers/dataseries.bro
Normal file
60
scripts/base/frameworks/logging/writers/dataseries.bro
Normal file
|
@ -0,0 +1,60 @@
|
|||
##! Interface for the DataSeries log writer.
|
||||
|
||||
module LogDataSeries;
|
||||
|
||||
export {
|
||||
## Compression to use with the DS output file. Options are:
|
||||
##
|
||||
## 'none' -- No compression.
|
||||
## 'lzf' -- LZF compression. Very quick, but leads to larger output files.
|
||||
## 'lzo' -- LZO compression. Very fast decompression times.
|
||||
## 'gz' -- GZIP compression. Slower than LZF, but also produces smaller output.
|
||||
## 'bz2' -- BZIP2 compression. Slower than GZIP, but also produces smaller output.
|
||||
const compression = "gz" &redef;
|
||||
|
||||
## The extent buffer size.
|
||||
## Larger values here lead to better compression and more efficient writes, but
|
||||
## also increase the lag between the time events are received and the time they
|
||||
## are actually written to disk.
|
||||
const extent_size = 65536 &redef;
|
||||
|
||||
## Should we dump the XML schema we use for this DS file to disk?
|
||||
## If yes, the XML schema shares the name of the logfile, but has
|
||||
## an XML ending.
|
||||
const dump_schema = F &redef;
|
||||
|
||||
## How many threads should DataSeries spawn to perform compression?
|
||||
## Note that this dictates the number of threads per log stream. If
|
||||
## you're using a lot of streams, you may want to keep this number
|
||||
## relatively small.
|
||||
##
|
||||
## Default value is 1, which will spawn one thread / stream.
|
||||
##
|
||||
## Maximum is 128, minimum is 1.
|
||||
const num_threads = 1 &redef;
|
||||
|
||||
## Should time be stored as an integer or a double?
|
||||
## Storing time as a double leads to possible precision issues and
|
||||
## can (significantly) increase the size of the resulting DS log.
|
||||
## That said, timestamps stored in double form are consistent
|
||||
## with the rest of Bro, including the standard ASCII log. Hence, we
|
||||
## use them by default.
|
||||
const use_integer_for_time = F &redef;
|
||||
}
|
||||
|
||||
# Default function to postprocess a rotated DataSeries log file. It moves the
|
||||
# rotated file to a new name that includes a timestamp with the opening time, and
|
||||
# then runs the writer's default postprocessor command on it.
|
||||
function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool
|
||||
{
|
||||
# Move file to name including both opening and closing time.
|
||||
local dst = fmt("%s.%s.ds", info$path,
|
||||
strftime(Log::default_rotation_date_format, info$open));
|
||||
|
||||
system(fmt("/bin/mv %s %s", info$fname, dst));
|
||||
|
||||
# Run default postprocessor.
|
||||
return Log::run_rotation_postprocessor_cmd(info, dst);
|
||||
}
|
||||
|
||||
redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func };
|
48
scripts/base/frameworks/logging/writers/elasticsearch.bro
Normal file
48
scripts/base/frameworks/logging/writers/elasticsearch.bro
Normal file
|
@ -0,0 +1,48 @@
|
|||
##! Log writer for sending logs to an ElasticSearch server.
|
||||
##!
|
||||
##! Note: This module is in testing and is not yet considered stable!
|
||||
##!
|
||||
##! There is one known memory issue. If your elasticsearch server is
|
||||
##! running slowly and taking too long to return from bulk insert
|
||||
##! requests, the message queue to the writer thread will continue
|
||||
##! growing larger and larger giving the appearance of a memory leak.
|
||||
|
||||
module LogElasticSearch;
|
||||
|
||||
export {
|
||||
## Name of the ES cluster
|
||||
const cluster_name = "elasticsearch" &redef;
|
||||
|
||||
## ES Server
|
||||
const server_host = "127.0.0.1" &redef;
|
||||
|
||||
## ES Port
|
||||
const server_port = 9200 &redef;
|
||||
|
||||
## Name of the ES index
|
||||
const index_prefix = "bro" &redef;
|
||||
|
||||
## The ES type prefix comes before the name of the related log.
|
||||
## e.g. prefix = "bro\_" would create types of bro_dns, bro_software, etc.
|
||||
const type_prefix = "" &redef;
|
||||
|
||||
## The time before an ElasticSearch transfer will timeout. Note that
|
||||
## the fractional part of the timeout will be ignored. In particular, time
|
||||
## specifications less than a second result in a timeout value of 0, which
|
||||
## means "no timeout."
|
||||
const transfer_timeout = 2secs;
|
||||
|
||||
## The batch size is the number of messages that will be queued up before
|
||||
## they are sent to be bulk indexed.
|
||||
const max_batch_size = 1000 &redef;
|
||||
|
||||
## The maximum amount of wall-clock time that is allowed to pass without
|
||||
## finishing a bulk log send. This represents the maximum delay you
|
||||
## would like to have with your logs before they are sent to ElasticSearch.
|
||||
const max_batch_interval = 1min &redef;
|
||||
|
||||
## The maximum byte size for a buffered JSON string to send to the bulk
|
||||
## insert API.
|
||||
const max_byte_size = 1024 * 1024 &redef;
|
||||
}
|
||||
|
17
scripts/base/frameworks/logging/writers/none.bro
Normal file
17
scripts/base/frameworks/logging/writers/none.bro
Normal file
|
@ -0,0 +1,17 @@
|
|||
##! Interface for the None log writer. Thiis writer is mainly for debugging.
|
||||
|
||||
module LogNone;
|
||||
|
||||
export {
|
||||
## If true, output debugging output that can be useful for unit
|
||||
## testing the logging framework.
|
||||
const debug = F &redef;
|
||||
}
|
||||
|
||||
function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool
|
||||
{
|
||||
return T;
|
||||
}
|
||||
|
||||
redef Log::default_rotation_postprocessors += { [Log::WRITER_NONE] = default_rotation_postprocessor_func };
|
||||
|
|
@ -101,7 +101,7 @@ event bro_init()
|
|||
# This replaces the standard non-pretty-printing filter.
|
||||
Log::add_filter(Notice::ALARM_LOG,
|
||||
[$name="alarm-mail", $writer=Log::WRITER_NONE,
|
||||
$interv=Log::default_rotation_interval,
|
||||
$interv=Log::default_mail_alarms_interval,
|
||||
$postprocessor=pp_postprocessor]);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,10 @@ redef Cluster::worker2manager_events += /Notice::cluster_notice/;
|
|||
@if ( Cluster::local_node_type() != Cluster::MANAGER )
|
||||
# The notice policy is completely handled by the manager and shouldn't be
|
||||
# done by workers or proxies to save time for packet processing.
|
||||
redef policy = {};
|
||||
event bro_init() &priority=11
|
||||
{
|
||||
Notice::policy = table();
|
||||
}
|
||||
|
||||
event Notice::begin_suppression(n: Notice::Info)
|
||||
{
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
##! This framework is intended to create an output and filtering path for
|
||||
##! internal messages/warnings/errors. It should typically be loaded to
|
||||
##! This framework is intended to create an output and filtering path for
|
||||
##! internal messages/warnings/errors. It should typically be loaded to
|
||||
##! avoid Bro spewing internal messages to standard error and instead log
|
||||
##! them to a file in a standard way. Note that this framework deals with
|
||||
##! the handling of internally-generated reporter messages, for the
|
||||
|
@ -13,11 +13,11 @@ export {
|
|||
redef enum Log::ID += { LOG };
|
||||
|
||||
## An indicator of reporter message severity.
|
||||
type Level: enum {
|
||||
type Level: enum {
|
||||
## Informational, not needing specific attention.
|
||||
INFO,
|
||||
INFO,
|
||||
## Warning of a potential problem.
|
||||
WARNING,
|
||||
WARNING,
|
||||
## A non-fatal error that should be addressed, but doesn't
|
||||
## terminate program execution.
|
||||
ERROR
|
||||
|
@ -36,24 +36,55 @@ export {
|
|||
## Not all reporter messages will have locations in them though.
|
||||
location: string &log &optional;
|
||||
};
|
||||
|
||||
## Tunable for sending reporter warning messages to STDERR. The option to
|
||||
## turn it off is presented here in case Bro is being run by some
|
||||
## external harness and shouldn't output anything to the console.
|
||||
const warnings_to_stderr = T &redef;
|
||||
|
||||
## Tunable for sending reporter error messages to STDERR. The option to
|
||||
## turn it off is presented here in case Bro is being run by some
|
||||
## external harness and shouldn't output anything to the console.
|
||||
const errors_to_stderr = T &redef;
|
||||
}
|
||||
|
||||
global stderr: file;
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Reporter::LOG, [$columns=Info]);
|
||||
|
||||
if ( errors_to_stderr || warnings_to_stderr )
|
||||
stderr = open("/dev/stderr");
|
||||
}
|
||||
|
||||
event reporter_info(t: time, msg: string, location: string)
|
||||
event reporter_info(t: time, msg: string, location: string) &priority=-5
|
||||
{
|
||||
Log::write(Reporter::LOG, [$ts=t, $level=INFO, $message=msg, $location=location]);
|
||||
}
|
||||
|
||||
event reporter_warning(t: time, msg: string, location: string)
|
||||
|
||||
event reporter_warning(t: time, msg: string, location: string) &priority=-5
|
||||
{
|
||||
if ( warnings_to_stderr )
|
||||
{
|
||||
if ( t > double_to_time(0.0) )
|
||||
print stderr, fmt("WARNING: %.6f %s (%s)", t, msg, location);
|
||||
else
|
||||
print stderr, fmt("WARNING: %s (%s)", msg, location);
|
||||
}
|
||||
|
||||
Log::write(Reporter::LOG, [$ts=t, $level=WARNING, $message=msg, $location=location]);
|
||||
}
|
||||
|
||||
event reporter_error(t: time, msg: string, location: string)
|
||||
event reporter_error(t: time, msg: string, location: string) &priority=-5
|
||||
{
|
||||
if ( errors_to_stderr )
|
||||
{
|
||||
if ( t > double_to_time(0.0) )
|
||||
print stderr, fmt("ERROR: %.6f %s (%s)", t, msg, location);
|
||||
else
|
||||
print stderr, fmt("ERROR: %s (%s)", msg, location);
|
||||
}
|
||||
|
||||
Log::write(Reporter::LOG, [$ts=t, $level=ERROR, $message=msg, $location=location]);
|
||||
}
|
||||
|
|
1
scripts/base/frameworks/tunnels/__load__.bro
Normal file
1
scripts/base/frameworks/tunnels/__load__.bro
Normal file
|
@ -0,0 +1 @@
|
|||
@load ./main
|
149
scripts/base/frameworks/tunnels/main.bro
Normal file
149
scripts/base/frameworks/tunnels/main.bro
Normal file
|
@ -0,0 +1,149 @@
|
|||
##! This script handles the tracking/logging of tunnels (e.g. Teredo,
|
||||
##! AYIYA, or IP-in-IP such as 6to4 where "IP" is either IPv4 or IPv6).
|
||||
##!
|
||||
##! For any connection that occurs over a tunnel, information about its
|
||||
##! encapsulating tunnels is also found in the *tunnel* field of
|
||||
##! :bro:type:`connection`.
|
||||
|
||||
module Tunnel;
|
||||
|
||||
export {
|
||||
## The tunnel logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
## Types of interesting activity that can occur with a tunnel.
|
||||
type Action: enum {
|
||||
## A new tunnel (encapsulating "connection") has been seen.
|
||||
DISCOVER,
|
||||
## A tunnel connection has closed.
|
||||
CLOSE,
|
||||
## No new connections over a tunnel happened in the amount of
|
||||
## time indicated by :bro:see:`Tunnel::expiration_interval`.
|
||||
EXPIRE,
|
||||
};
|
||||
|
||||
## The record type which contains column fields of the tunnel log.
|
||||
type Info: record {
|
||||
## Time at which some tunnel activity occurred.
|
||||
ts: time &log;
|
||||
## The unique identifier for the tunnel, which may correspond
|
||||
## to a :bro:type:`connection`'s *uid* field for non-IP-in-IP tunnels.
|
||||
## This is optional because there could be numerous connections
|
||||
## for payload proxies like SOCKS but we should treat it as a single
|
||||
## tunnel.
|
||||
uid: string &log &optional;
|
||||
## The tunnel "connection" 4-tuple of endpoint addresses/ports.
|
||||
## For an IP tunnel, the ports will be 0.
|
||||
id: conn_id &log;
|
||||
## The type of tunnel.
|
||||
tunnel_type: Tunnel::Type &log;
|
||||
## The type of activity that occurred.
|
||||
action: Action &log;
|
||||
};
|
||||
|
||||
## Logs all tunnels in an encapsulation chain with action
|
||||
## :bro:see:`Tunnel::DISCOVER` that aren't already in the
|
||||
## :bro:id:`Tunnel::active` table and adds them if not.
|
||||
global register_all: function(ecv: EncapsulatingConnVector);
|
||||
|
||||
## Logs a single tunnel "connection" with action
|
||||
## :bro:see:`Tunnel::DISCOVER` if it's not already in the
|
||||
## :bro:id:`Tunnel::active` table and adds it if not.
|
||||
global register: function(ec: EncapsulatingConn);
|
||||
|
||||
## Logs a single tunnel "connection" with action
|
||||
## :bro:see:`Tunnel::EXPIRE` and removes it from the
|
||||
## :bro:id:`Tunnel::active` table.
|
||||
##
|
||||
## t: A table of tunnels.
|
||||
##
|
||||
## idx: The index of the tunnel table corresponding to the tunnel to expire.
|
||||
##
|
||||
## Returns: 0secs, which when this function is used as an
|
||||
## :bro:attr:`&expire_func`, indicates to remove the element at
|
||||
## *idx* immediately.
|
||||
global expire: function(t: table[conn_id] of Info, idx: conn_id): interval;
|
||||
|
||||
## Removes a single tunnel from the :bro:id:`Tunnel::active` table
|
||||
## and logs the closing/expiration of the tunnel.
|
||||
##
|
||||
## tunnel: The tunnel which has closed or expired.
|
||||
##
|
||||
## action: The specific reason for the tunnel ending.
|
||||
global close: function(tunnel: Info, action: Action);
|
||||
|
||||
## The amount of time a tunnel is not used in establishment of new
|
||||
## connections before it is considered inactive/expired.
|
||||
const expiration_interval = 1hrs &redef;
|
||||
|
||||
## Currently active tunnels. That is, tunnels for which new, encapsulated
|
||||
## connections have been seen in the interval indicated by
|
||||
## :bro:see:`Tunnel::expiration_interval`.
|
||||
global active: table[conn_id] of Info = table() &read_expire=expiration_interval &expire_func=expire;
|
||||
}
|
||||
|
||||
const ayiya_ports = { 5072/udp };
|
||||
redef dpd_config += { [ANALYZER_AYIYA] = [$ports = ayiya_ports] };
|
||||
|
||||
const teredo_ports = { 3544/udp };
|
||||
redef dpd_config += { [ANALYZER_TEREDO] = [$ports = teredo_ports] };
|
||||
|
||||
redef likely_server_ports += { ayiya_ports, teredo_ports };
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Tunnel::LOG, [$columns=Info]);
|
||||
}
|
||||
|
||||
function register_all(ecv: EncapsulatingConnVector)
|
||||
{
|
||||
for ( i in ecv )
|
||||
register(ecv[i]);
|
||||
}
|
||||
|
||||
function register(ec: EncapsulatingConn)
|
||||
{
|
||||
if ( ec$cid !in active )
|
||||
{
|
||||
local tunnel: Info;
|
||||
tunnel$ts = network_time();
|
||||
if ( ec?$uid )
|
||||
tunnel$uid = ec$uid;
|
||||
tunnel$id = ec$cid;
|
||||
tunnel$action = DISCOVER;
|
||||
tunnel$tunnel_type = ec$tunnel_type;
|
||||
active[ec$cid] = tunnel;
|
||||
Log::write(LOG, tunnel);
|
||||
}
|
||||
}
|
||||
|
||||
function close(tunnel: Info, action: Action)
|
||||
{
|
||||
tunnel$action = action;
|
||||
tunnel$ts = network_time();
|
||||
Log::write(LOG, tunnel);
|
||||
delete active[tunnel$id];
|
||||
}
|
||||
|
||||
function expire(t: table[conn_id] of Info, idx: conn_id): interval
|
||||
{
|
||||
close(t[idx], EXPIRE);
|
||||
return 0secs;
|
||||
}
|
||||
|
||||
event new_connection(c: connection) &priority=5
|
||||
{
|
||||
if ( c?$tunnel )
|
||||
register_all(c$tunnel);
|
||||
}
|
||||
|
||||
event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5
|
||||
{
|
||||
register_all(e);
|
||||
}
|
||||
|
||||
event connection_state_remove(c: connection) &priority=-5
|
||||
{
|
||||
if ( c$id in active )
|
||||
close(active[c$id], CLOSE);
|
||||
}
|
|
@ -115,6 +115,61 @@ type icmp_context: record {
|
|||
DF: bool; ##< True if the packets *don't fragment* flag is set.
|
||||
};
|
||||
|
||||
## Values extracted from a Prefix Information option in an ICMPv6 neighbor
|
||||
## discovery message as specified by :rfc:`4861`.
|
||||
##
|
||||
## .. bro:see:: icmp6_nd_option
|
||||
type icmp6_nd_prefix_info: record {
|
||||
## Number of leading bits of the *prefix* that are valid.
|
||||
prefix_len: count;
|
||||
## Flag indicating the prefix can be used for on-link determination.
|
||||
L_flag: bool;
|
||||
## Autonomous address-configuration flag.
|
||||
A_flag: bool;
|
||||
## Length of time in seconds that the prefix is valid for purpose of
|
||||
## on-link determination (0xffffffff represents infinity).
|
||||
valid_lifetime: interval;
|
||||
## Length of time in seconds that the addresses generated from the prefix
|
||||
## via stateless address autoconfiguration remain preferred
|
||||
## (0xffffffff represents infinity).
|
||||
preferred_lifetime: interval;
|
||||
## An IP address or prefix of an IP address. Use the *prefix_len* field
|
||||
## to convert this into a :bro:type:`subnet`.
|
||||
prefix: addr;
|
||||
};
|
||||
|
||||
## Options extracted from ICMPv6 neighbor discovery messages as specified
|
||||
## by :rfc:`4861`.
|
||||
##
|
||||
## .. bro:see:: icmp_router_solicitation icmp_router_advertisement
|
||||
## icmp_neighbor_advertisement icmp_neighbor_solicitation icmp_redirect
|
||||
## icmp6_nd_options
|
||||
type icmp6_nd_option: record {
|
||||
## 8-bit identifier of the type of option.
|
||||
otype: count;
|
||||
## 8-bit integer representing the length of the option (including the type
|
||||
## and length fields) in units of 8 octets.
|
||||
len: count;
|
||||
## Source Link-Layer Address (Type 1) or Target Link-Layer Address (Type 2).
|
||||
## Byte ordering of this is dependent on the actual link-layer.
|
||||
link_address: string &optional;
|
||||
## Prefix Information (Type 3).
|
||||
prefix: icmp6_nd_prefix_info &optional;
|
||||
## Redirected header (Type 4). This field contains the context of the
|
||||
## original, redirected packet.
|
||||
redirect: icmp_context &optional;
|
||||
## Recommended MTU for the link (Type 5).
|
||||
mtu: count &optional;
|
||||
## The raw data of the option (everything after type & length fields),
|
||||
## useful for unknown option types or when the full option payload is
|
||||
## truncated in the captured packet. In those cases, option fields
|
||||
## won't be pre-extracted into the fields above.
|
||||
payload: string &optional;
|
||||
};
|
||||
|
||||
## A type alias for a vector of ICMPv6 neighbor discovery message options.
|
||||
type icmp6_nd_options: vector of icmp6_nd_option;
|
||||
|
||||
# A DNS mapping between IP address and hostname resolved by Bro's internal
|
||||
# resolver.
|
||||
#
|
||||
|
@ -178,9 +233,35 @@ type endpoint_stats: record {
|
|||
## use ``count``. That should be changed.
|
||||
type AnalyzerID: count;
|
||||
|
||||
## Statistics about an endpoint.
|
||||
module Tunnel;
|
||||
export {
|
||||
## Records the identity of an encapsulating parent of a tunneled connection.
|
||||
type EncapsulatingConn: record {
|
||||
## The 4-tuple of the encapsulating "connection". In case of an IP-in-IP
|
||||
## tunnel the ports will be set to 0. The direction (i.e., orig and
|
||||
## resp) are set according to the first tunneled packet seen
|
||||
## and not according to the side that established the tunnel.
|
||||
cid: conn_id;
|
||||
## The type of tunnel.
|
||||
tunnel_type: Tunnel::Type;
|
||||
## A globally unique identifier that, for non-IP-in-IP tunnels,
|
||||
## cross-references the *uid* field of :bro:type:`connection`.
|
||||
uid: string &optional;
|
||||
} &log;
|
||||
} # end export
|
||||
module GLOBAL;
|
||||
|
||||
## A type alias for a vector of encapsulating "connections", i.e for when
|
||||
## there are tunnels within tunnels.
|
||||
##
|
||||
## todo::Where is this used?
|
||||
## .. todo:: We need this type definition only for declaring builtin functions
|
||||
## via ``bifcl``. We should extend ``bifcl`` to understand composite types
|
||||
## directly and then remove this alias.
|
||||
type EncapsulatingConnVector: vector of Tunnel::EncapsulatingConn;
|
||||
|
||||
## Statistics about a :bro:type:`connection` endpoint.
|
||||
##
|
||||
## .. bro:see:: connection
|
||||
type endpoint: record {
|
||||
size: count; ##< Logical size of data sent (for TCP: derived from sequence numbers).
|
||||
## Endpoint state. For TCP connection, one of the constants:
|
||||
|
@ -194,12 +275,15 @@ type endpoint: record {
|
|||
## Number of IP-level bytes sent. Only set if :bro:id:`use_conn_size_analyzer` is
|
||||
## true.
|
||||
num_bytes_ip: count &optional;
|
||||
## The current IPv6 flow label that the connection endpoint is using.
|
||||
## Always 0 if the connection is over IPv4.
|
||||
flow_label: count;
|
||||
};
|
||||
|
||||
# A connection. This is Bro's basic connection type describing IP- and
|
||||
# transport-layer information about the conversation. Note that Bro uses a
|
||||
# liberal interpreation of "connection" and associates instances of this type
|
||||
# also with UDP and ICMP flows.
|
||||
## A connection. This is Bro's basic connection type describing IP- and
|
||||
## transport-layer information about the conversation. Note that Bro uses a
|
||||
## liberal interpreation of "connection" and associates instances of this type
|
||||
## also with UDP and ICMP flows.
|
||||
type connection: record {
|
||||
id: conn_id; ##< The connection's identifying 4-tuple.
|
||||
orig: endpoint; ##< Statistics about originator side.
|
||||
|
@ -219,11 +303,17 @@ type connection: record {
|
|||
service: set[string];
|
||||
addl: string; ##< Deprecated.
|
||||
hot: count; ##< Deprecated.
|
||||
history: string; ##< State history of TCP connections. See *history* in :bro:see:`Conn::Info`.
|
||||
history: string; ##< State history of connections. See *history* in :bro:see:`Conn::Info`.
|
||||
## A globally unique connection identifier. For each connection, Bro creates an ID
|
||||
## that is very likely unique across independent Bro runs. These IDs can thus be
|
||||
## used to tag and locate information associated with that connection.
|
||||
uid: string;
|
||||
## If the connection is tunneled, this field contains information about
|
||||
## the encapsulating "connection(s)" with the outermost one starting
|
||||
## at index zero. It's also always the first such enapsulation seen
|
||||
## for the connection unless the :bro:id:`tunnel_changed` event is handled
|
||||
## and re-assigns this field to the new encapsulation.
|
||||
tunnel: EncapsulatingConnVector &optional;
|
||||
};
|
||||
|
||||
## Fields of a SYN packet.
|
||||
|
@ -612,7 +702,9 @@ function add_signature_file(sold: string, snew: string): string
|
|||
}
|
||||
|
||||
## Signature files to read. Use ``redef signature_files += "foo.sig"`` to
|
||||
## extend. Signature files will be searched relative to ``BROPATH``.
|
||||
## extend. Signature files added this way will be searched relative to
|
||||
## ``BROPATH``. Using the ``@load-sigs`` directive instead is preferred
|
||||
## since that can search paths relative to the current script.
|
||||
global signature_files = "" &add_func = add_signature_file;
|
||||
|
||||
## ``p0f`` fingerprint file to use. Will be searched relative to ``BROPATH``.
|
||||
|
@ -721,7 +813,7 @@ const tcp_storm_interarrival_thresh = 1 sec &redef;
|
|||
## peer's ACKs. Set to zero to turn off this determination.
|
||||
##
|
||||
## .. bro:see:: tcp_max_above_hole_without_any_acks tcp_excessive_data_without_further_acks
|
||||
const tcp_max_initial_window = 4096;
|
||||
const tcp_max_initial_window = 4096 &redef;
|
||||
|
||||
## If we're not seeing our peer's ACKs, the maximum volume of data above a sequence
|
||||
## hole that we'll tolerate before assuming that there's been a packet drop and we
|
||||
|
@ -729,7 +821,7 @@ const tcp_max_initial_window = 4096;
|
|||
## up.
|
||||
##
|
||||
## .. bro:see:: tcp_max_initial_window tcp_excessive_data_without_further_acks
|
||||
const tcp_max_above_hole_without_any_acks = 4096;
|
||||
const tcp_max_above_hole_without_any_acks = 4096 &redef;
|
||||
|
||||
## If we've seen this much data without any of it being acked, we give up
|
||||
## on that connection to avoid memory exhaustion due to buffering all that
|
||||
|
@ -738,7 +830,7 @@ const tcp_max_above_hole_without_any_acks = 4096;
|
|||
## has in fact gone too far, but for now we just make this quite beefy.
|
||||
##
|
||||
## .. bro:see:: tcp_max_initial_window tcp_max_above_hole_without_any_acks
|
||||
const tcp_excessive_data_without_further_acks = 10 * 1024 * 1024;
|
||||
const tcp_excessive_data_without_further_acks = 10 * 1024 * 1024 &redef;
|
||||
|
||||
## For services without an a handler, these sets define originator-side ports that
|
||||
## still trigger reassembly.
|
||||
|
@ -866,18 +958,9 @@ const frag_timeout = 0.0 sec &redef;
|
|||
const packet_sort_window = 0 usecs &redef;
|
||||
|
||||
## If positive, indicates the encapsulation header size that should
|
||||
## be skipped. This either applies to all packets, or if
|
||||
## :bro:see:`tunnel_port` is set, only to packets on that port.
|
||||
##
|
||||
## .. :bro:see:: tunnel_port
|
||||
## be skipped. This applies to all packets.
|
||||
const encap_hdr_size = 0 &redef;
|
||||
|
||||
## A UDP port that specifies which connections to apply :bro:see:`encap_hdr_size`
|
||||
## to.
|
||||
##
|
||||
## .. :bro:see:: encap_hdr_size
|
||||
const tunnel_port = 0/udp &redef;
|
||||
|
||||
## Whether to use the ``ConnSize`` analyzer to count the number of packets and
|
||||
## IP-level bytes transfered by each endpoint. If true, these values are returned
|
||||
## in the connection's :bro:see:`endpoint` record value.
|
||||
|
@ -954,16 +1037,19 @@ const IPPROTO_MOBILITY = 135; ##< IPv6 mobility header.
|
|||
## Values extracted from an IPv6 extension header's (e.g. hop-by-hop or
|
||||
## destination option headers) option field.
|
||||
##
|
||||
## .. bro:see:: ip6_hdr ip6_hdr_chain ip6_hopopts ip6_dstopts
|
||||
## .. bro:see:: ip6_hdr ip6_ext_hdr ip6_hopopts ip6_dstopts
|
||||
type ip6_option: record {
|
||||
otype: count; ##< Option type.
|
||||
len: count; ##< Option data length.
|
||||
data: string; ##< Option data.
|
||||
};
|
||||
|
||||
## A type alias for a vector of IPv6 options.
|
||||
type ip6_options: vector of ip6_option;
|
||||
|
||||
## Values extracted from an IPv6 Hop-by-Hop options extension header.
|
||||
##
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain ip6_option
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr ip6_option
|
||||
type ip6_hopopts: record {
|
||||
## Protocol number of the next header (RFC 1700 et seq., IANA assigned
|
||||
## number), e.g. :bro:id:`IPPROTO_ICMP`.
|
||||
|
@ -971,12 +1057,12 @@ type ip6_hopopts: record {
|
|||
## Length of header in 8-octet units, excluding first unit.
|
||||
len: count;
|
||||
## The TLV encoded options;
|
||||
options: vector of ip6_option;
|
||||
options: ip6_options;
|
||||
};
|
||||
|
||||
## Values extracted from an IPv6 Destination options extension header.
|
||||
##
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain ip6_option
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr ip6_option
|
||||
type ip6_dstopts: record {
|
||||
## Protocol number of the next header (RFC 1700 et seq., IANA assigned
|
||||
## number), e.g. :bro:id:`IPPROTO_ICMP`.
|
||||
|
@ -984,12 +1070,12 @@ type ip6_dstopts: record {
|
|||
## Length of header in 8-octet units, excluding first unit.
|
||||
len: count;
|
||||
## The TLV encoded options;
|
||||
options: vector of ip6_option;
|
||||
options: ip6_options;
|
||||
};
|
||||
|
||||
## Values extracted from an IPv6 Routing extension header.
|
||||
##
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr
|
||||
type ip6_routing: record {
|
||||
## Protocol number of the next header (RFC 1700 et seq., IANA assigned
|
||||
## number), e.g. :bro:id:`IPPROTO_ICMP`.
|
||||
|
@ -1006,7 +1092,7 @@ type ip6_routing: record {
|
|||
|
||||
## Values extracted from an IPv6 Fragment extension header.
|
||||
##
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr
|
||||
type ip6_fragment: record {
|
||||
## Protocol number of the next header (RFC 1700 et seq., IANA assigned
|
||||
## number), e.g. :bro:id:`IPPROTO_ICMP`.
|
||||
|
@ -1025,7 +1111,7 @@ type ip6_fragment: record {
|
|||
|
||||
## Values extracted from an IPv6 Authentication extension header.
|
||||
##
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr
|
||||
type ip6_ah: record {
|
||||
## Protocol number of the next header (RFC 1700 et seq., IANA assigned
|
||||
## number), e.g. :bro:id:`IPPROTO_ICMP`.
|
||||
|
@ -1036,15 +1122,15 @@ type ip6_ah: record {
|
|||
rsv: count;
|
||||
## Security Parameter Index.
|
||||
spi: count;
|
||||
## Sequence number.
|
||||
seq: count;
|
||||
## Authentication data.
|
||||
data: string;
|
||||
## Sequence number, unset in the case that *len* field is zero.
|
||||
seq: count &optional;
|
||||
## Authentication data, unset in the case that *len* field is zero.
|
||||
data: string &optional;
|
||||
};
|
||||
|
||||
## Values extracted from an IPv6 ESP extension header.
|
||||
##
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr
|
||||
type ip6_esp: record {
|
||||
## Security Parameters Index.
|
||||
spi: count;
|
||||
|
@ -1054,7 +1140,7 @@ type ip6_esp: record {
|
|||
|
||||
## Values extracted from an IPv6 Mobility Binding Refresh Request message.
|
||||
##
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg
|
||||
type ip6_mobility_brr: record {
|
||||
## Reserved.
|
||||
rsv: count;
|
||||
|
@ -1064,7 +1150,7 @@ type ip6_mobility_brr: record {
|
|||
|
||||
## Values extracted from an IPv6 Mobility Home Test Init message.
|
||||
##
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg
|
||||
type ip6_mobility_hoti: record {
|
||||
## Reserved.
|
||||
rsv: count;
|
||||
|
@ -1076,7 +1162,7 @@ type ip6_mobility_hoti: record {
|
|||
|
||||
## Values extracted from an IPv6 Mobility Care-of Test Init message.
|
||||
##
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg
|
||||
type ip6_mobility_coti: record {
|
||||
## Reserved.
|
||||
rsv: count;
|
||||
|
@ -1088,7 +1174,7 @@ type ip6_mobility_coti: record {
|
|||
|
||||
## Values extracted from an IPv6 Mobility Home Test message.
|
||||
##
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg
|
||||
type ip6_mobility_hot: record {
|
||||
## Home Nonce Index.
|
||||
nonce_idx: count;
|
||||
|
@ -1102,7 +1188,7 @@ type ip6_mobility_hot: record {
|
|||
|
||||
## Values extracted from an IPv6 Mobility Care-of Test message.
|
||||
##
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg
|
||||
type ip6_mobility_cot: record {
|
||||
## Care-of Nonce Index.
|
||||
nonce_idx: count;
|
||||
|
@ -1116,7 +1202,7 @@ type ip6_mobility_cot: record {
|
|||
|
||||
## Values extracted from an IPv6 Mobility Binding Update message.
|
||||
##
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg
|
||||
type ip6_mobility_bu: record {
|
||||
## Sequence number.
|
||||
seq: count;
|
||||
|
@ -1136,7 +1222,7 @@ type ip6_mobility_bu: record {
|
|||
|
||||
## Values extracted from an IPv6 Mobility Binding Acknowledgement message.
|
||||
##
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg
|
||||
type ip6_mobility_back: record {
|
||||
## Status.
|
||||
status: count;
|
||||
|
@ -1152,7 +1238,7 @@ type ip6_mobility_back: record {
|
|||
|
||||
## Values extracted from an IPv6 Mobility Binding Error message.
|
||||
##
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain ip6_mobility_msg
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr ip6_mobility_msg
|
||||
type ip6_mobility_be: record {
|
||||
## Status.
|
||||
status: count;
|
||||
|
@ -1164,7 +1250,7 @@ type ip6_mobility_be: record {
|
|||
|
||||
## Values extracted from an IPv6 Mobility header's message data.
|
||||
##
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_hdr_chain
|
||||
## .. bro:see:: ip6_mobility_hdr ip6_hdr ip6_ext_hdr
|
||||
type ip6_mobility_msg: record {
|
||||
## The type of message from the header's MH Type field.
|
||||
id: count;
|
||||
|
@ -1188,7 +1274,7 @@ type ip6_mobility_msg: record {
|
|||
|
||||
## Values extracted from an IPv6 Mobility header.
|
||||
##
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_hdr_chain
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr ip6_ext_hdr
|
||||
type ip6_mobility_hdr: record {
|
||||
## Protocol number of the next header (RFC 1700 et seq., IANA assigned
|
||||
## number), e.g. :bro:id:`IPPROTO_ICMP`.
|
||||
|
@ -1229,9 +1315,12 @@ type ip6_ext_hdr: record {
|
|||
mobility: ip6_mobility_hdr &optional;
|
||||
};
|
||||
|
||||
## A type alias for a vector of IPv6 extension headers.
|
||||
type ip6_ext_hdr_chain: vector of ip6_ext_hdr;
|
||||
|
||||
## Values extracted from an IPv6 header.
|
||||
##
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_hdr_chain ip6_hopopts ip6_dstopts
|
||||
## .. bro:see:: pkt_hdr ip4_hdr ip6_ext_hdr ip6_hopopts ip6_dstopts
|
||||
## ip6_routing ip6_fragment ip6_ah ip6_esp
|
||||
type ip6_hdr: record {
|
||||
class: count; ##< Traffic class.
|
||||
|
@ -1243,7 +1332,7 @@ type ip6_hdr: record {
|
|||
hlim: count; ##< Hop limit.
|
||||
src: addr; ##< Source address.
|
||||
dst: addr; ##< Destination address.
|
||||
exts: vector of ip6_ext_hdr; ##< Extension header chain.
|
||||
exts: ip6_ext_hdr_chain; ##< Extension header chain.
|
||||
};
|
||||
|
||||
## Values extracted from an IPv4 header.
|
||||
|
@ -1312,6 +1401,42 @@ type pkt_hdr: record {
|
|||
icmp: icmp_hdr &optional; ##< The ICMP header if an ICMP packet.
|
||||
};
|
||||
|
||||
## A Teredo origin indication header. See :rfc:`4380` for more information
|
||||
## about the Teredo protocol.
|
||||
##
|
||||
## .. bro:see:: teredo_bubble teredo_origin_indication teredo_authentication
|
||||
## teredo_hdr
|
||||
type teredo_auth: record {
|
||||
id: string; ##< Teredo client identifier.
|
||||
value: string; ##< HMAC-SHA1 over shared secret key between client and
|
||||
##< server, nonce, confirmation byte, origin indication
|
||||
##< (if present), and the IPv6 packet.
|
||||
nonce: count; ##< Nonce chosen by Teredo client to be repeated by
|
||||
##< Teredo server.
|
||||
confirm: count; ##< Confirmation byte to be set to 0 by Teredo client
|
||||
##< and non-zero by server if client needs new key.
|
||||
};
|
||||
|
||||
## A Teredo authentication header. See :rfc:`4380` for more information
|
||||
## about the Teredo protocol.
|
||||
##
|
||||
## .. bro:see:: teredo_bubble teredo_origin_indication teredo_authentication
|
||||
## teredo_hdr
|
||||
type teredo_origin: record {
|
||||
p: port; ##< Unobfuscated UDP port of Teredo client.
|
||||
a: addr; ##< Unobfuscated IPv4 address of Teredo client.
|
||||
};
|
||||
|
||||
## A Teredo packet header. See :rfc:`4380` for more information about the
|
||||
## Teredo protocol.
|
||||
##
|
||||
## .. bro:see:: teredo_bubble teredo_origin_indication teredo_authentication
|
||||
type teredo_hdr: record {
|
||||
auth: teredo_auth &optional; ##< Teredo authentication header.
|
||||
origin: teredo_origin &optional; ##< Teredo origin indication header.
|
||||
hdr: pkt_hdr; ##< IPv6 and transport protocol headers.
|
||||
};
|
||||
|
||||
## Definition of "secondary filters". A secondary filter is a BPF filter given as
|
||||
## index in this table. For each such filter, the corresponding event is raised for
|
||||
## all matching packets.
|
||||
|
@ -1789,6 +1914,14 @@ export {
|
|||
};
|
||||
} # end export
|
||||
|
||||
module Threading;
|
||||
|
||||
export {
|
||||
## The heartbeat interval used by the threading framework.
|
||||
## Changing this should usually not be neccessary and will break several tests.
|
||||
const heartbeat_interval = 1.0 secs &redef;
|
||||
}
|
||||
|
||||
module GLOBAL;
|
||||
|
||||
## An NTP message.
|
||||
|
@ -2311,11 +2444,37 @@ type bittorrent_benc_dir: table[string] of bittorrent_benc_value;
|
|||
## bt_tracker_response_not_ok
|
||||
type bt_tracker_headers: table[string] of string;
|
||||
|
||||
type ModbusCoils: vector of bool;
|
||||
type ModbusRegisters: vector of count;
|
||||
|
||||
type ModbusHeaders: record {
|
||||
tid: count;
|
||||
pid: count;
|
||||
uid: count;
|
||||
function_code: count;
|
||||
};
|
||||
|
||||
module SOCKS;
|
||||
export {
|
||||
## This record is for a SOCKS client or server to provide either a
|
||||
## name or an address to represent a desired or established connection.
|
||||
type Address: record {
|
||||
host: addr &optional;
|
||||
name: string &optional;
|
||||
} &log;
|
||||
}
|
||||
module GLOBAL;
|
||||
|
||||
@load base/event.bif
|
||||
|
||||
## BPF filter the user has set via the -f command line options. Empty if none.
|
||||
const cmd_line_bpf_filter = "" &redef;
|
||||
|
||||
## The maximum number of open files to keep cached at a given time.
|
||||
## If set to zero, this is automatically determined by inspecting
|
||||
## the current/maximum limit on open files for the process.
|
||||
const max_files_in_cache = 0 &redef;
|
||||
|
||||
## Deprecated.
|
||||
const log_rotate_interval = 0 sec &redef;
|
||||
|
||||
|
@ -2599,11 +2758,41 @@ const record_all_packets = F &redef;
|
|||
## .. bro:see:: conn_stats
|
||||
const ignore_keep_alive_rexmit = F &redef;
|
||||
|
||||
## Whether the analysis engine parses IP packets encapsulated in
|
||||
## UDP tunnels.
|
||||
##
|
||||
## .. bro:see:: tunnel_port
|
||||
const parse_udp_tunnels = F &redef;
|
||||
module Tunnel;
|
||||
export {
|
||||
## The maximum depth of a tunnel to decapsulate until giving up.
|
||||
## Setting this to zero will disable all types of tunnel decapsulation.
|
||||
const max_depth: count = 2 &redef;
|
||||
|
||||
## Toggle whether to do IPv{4,6}-in-IPv{4,6} decapsulation.
|
||||
const enable_ip = T &redef;
|
||||
|
||||
## Toggle whether to do IPv{4,6}-in-AYIYA decapsulation.
|
||||
const enable_ayiya = T &redef;
|
||||
|
||||
## Toggle whether to do IPv6-in-Teredo decapsulation.
|
||||
const enable_teredo = T &redef;
|
||||
|
||||
## With this option set, the Teredo analysis will first check to see if
|
||||
## other protocol analyzers have confirmed that they think they're
|
||||
## parsing the right protocol and only continue with Teredo tunnel
|
||||
## decapsulation if nothing else has yet confirmed. This can help
|
||||
## reduce false positives of UDP traffic (e.g. DNS) that also happens
|
||||
## to have a valid Teredo encapsulation.
|
||||
const yielding_teredo_decapsulation = T &redef;
|
||||
|
||||
## With this set, the Teredo analyzer waits until it sees both sides
|
||||
## of a connection using a valid Teredo encapsulation before issuing
|
||||
## a :bro:see:`protocol_confirmation`. If it's false, the first
|
||||
## occurence of a packet with valid Teredo encapsulation causes a
|
||||
## confirmation. Both cases are still subject to effects of
|
||||
## :bro:see:`Tunnel::yielding_teredo_decapsulation`.
|
||||
const delay_teredo_confirmation = T &redef;
|
||||
|
||||
## How often to cleanup internal state for inactive IP tunnels.
|
||||
const ip_tunnel_timeout = 24hrs &redef;
|
||||
} # end export
|
||||
module GLOBAL;
|
||||
|
||||
## Number of bytes per packet to capture from live interfaces.
|
||||
const snaplen = 8192 &redef;
|
||||
|
@ -2611,3 +2800,6 @@ const snaplen = 8192 &redef;
|
|||
# Load the logging framework here because it uses fairly deep integration with
|
||||
# BiFs and script-land defined types.
|
||||
@load base/frameworks/logging
|
||||
|
||||
@load base/frameworks/input
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
@load base/utils/patterns
|
||||
@load base/utils/strings
|
||||
@load base/utils/thresholds
|
||||
@load base/utils/urls
|
||||
|
||||
# This has some deep interplay between types and BiFs so it's
|
||||
# loaded in base/init-bare.bro
|
||||
|
@ -30,13 +31,18 @@
|
|||
@load base/frameworks/intel
|
||||
@load base/frameworks/reporter
|
||||
@load base/frameworks/protocols
|
||||
@load base/frameworks/tunnels
|
||||
|
||||
@load base/protocols/conn
|
||||
@load base/protocols/dns
|
||||
@load base/protocols/ftp
|
||||
@load base/protocols/http
|
||||
@load base/protocols/irc
|
||||
@load base/protocols/modbus
|
||||
@load base/protocols/smtp
|
||||
@load base/protocols/socks
|
||||
@load base/protocols/ssh
|
||||
@load base/protocols/ssl
|
||||
@load base/protocols/syslog
|
||||
|
||||
@load base/misc/find-checksum-offloading
|
||||
|
|
57
scripts/base/misc/find-checksum-offloading.bro
Normal file
57
scripts/base/misc/find-checksum-offloading.bro
Normal file
|
@ -0,0 +1,57 @@
|
|||
##! Discover cases where the local interface is sniffed and outbound packets
|
||||
##! have checksum offloading. Load this script to receive a notice if it's
|
||||
##! likely that checksum offload effects are being seen on a live interface or
|
||||
##! in a packet trace file.
|
||||
|
||||
@load base/frameworks/notice
|
||||
|
||||
module ChecksumOffloading;
|
||||
|
||||
export {
|
||||
## The interval which is used for checking packet statistics
|
||||
## to see if checksum offloading is affecting analysis.
|
||||
const check_interval = 10secs &redef;
|
||||
}
|
||||
|
||||
# Keep track of how many bad checksums have been seen.
|
||||
global bad_checksums = 0;
|
||||
|
||||
# Track to see if this script is done so that messages aren't created multiple times.
|
||||
global done = F;
|
||||
|
||||
event ChecksumOffloading::check()
|
||||
{
|
||||
if ( done )
|
||||
return;
|
||||
|
||||
local pkts_recvd = net_stats()$pkts_recvd;
|
||||
if ( (bad_checksums*1.0 / net_stats()$pkts_recvd*1.0) > 0.05 )
|
||||
{
|
||||
local packet_src = reading_traces() ? "trace file likely has" : "interface is likely receiving";
|
||||
local message = fmt("Your %s invalid IP checksums, most likely from NIC checksum offloading.", packet_src);
|
||||
Reporter::warning(message);
|
||||
done = T;
|
||||
}
|
||||
else if ( pkts_recvd < 20 )
|
||||
{
|
||||
# Keep scheduling this event until we've seen some lower threshold of
|
||||
# total packets.
|
||||
schedule check_interval { ChecksumOffloading::check() };
|
||||
}
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
schedule check_interval { ChecksumOffloading::check() };
|
||||
}
|
||||
|
||||
event net_weird(name: string)
|
||||
{
|
||||
if ( name == "bad_IP_checksum" )
|
||||
++bad_checksums;
|
||||
}
|
||||
|
||||
event bro_done()
|
||||
{
|
||||
event ChecksumOffloading::check();
|
||||
}
|
|
@ -1,3 +1,4 @@
|
|||
@load ./main
|
||||
@load ./contents
|
||||
@load ./inactivity
|
||||
@load ./polling
|
||||
|
|
|
@ -17,7 +17,7 @@ export {
|
|||
type Info: record {
|
||||
## This is the time of the first packet.
|
||||
ts: time &log;
|
||||
## A unique identifier of a connection.
|
||||
## A unique identifier of the connection.
|
||||
uid: string &log;
|
||||
## The connection's 4-tuple of endpoint addresses/ports.
|
||||
id: conn_id &log;
|
||||
|
@ -30,7 +30,7 @@ export {
|
|||
## tear-downs, this will not include the final ACK.
|
||||
duration: interval &log &optional;
|
||||
## The number of payload bytes the originator sent. For TCP
|
||||
## this is taken from sequence numbers and might be inaccurate
|
||||
## this is taken from sequence numbers and might be inaccurate
|
||||
## (e.g., due to large connections)
|
||||
orig_bytes: count &log &optional;
|
||||
## The number of payload bytes the responder sent. See ``orig_bytes``.
|
||||
|
@ -54,21 +54,21 @@ export {
|
|||
## OTH No SYN seen, just midstream traffic (a "partial connection" that was not later closed).
|
||||
## ========== ===============================================
|
||||
conn_state: string &log &optional;
|
||||
|
||||
|
||||
## If the connection is originated locally, this value will be T. If
|
||||
## it was originated remotely it will be F. In the case that the
|
||||
## :bro:id:`Site::local_nets` variable is undefined, this field will
|
||||
## :bro:id:`Site::local_nets` variable is undefined, this field will
|
||||
## be left empty at all times.
|
||||
local_orig: bool &log &optional;
|
||||
|
||||
## Indicates the number of bytes missed in content gaps which is
|
||||
## representative of packet loss. A value other than zero will
|
||||
## normally cause protocol analysis to fail but some analysis may
|
||||
|
||||
## Indicates the number of bytes missed in content gaps, which is
|
||||
## representative of packet loss. A value other than zero will
|
||||
## normally cause protocol analysis to fail but some analysis may
|
||||
## have been completed prior to the packet loss.
|
||||
missed_bytes: count &log &default=0;
|
||||
|
||||
## Records the state history of connections as a string of letters.
|
||||
## For TCP connections the meaning of those letters is:
|
||||
## The meaning of those letters is:
|
||||
##
|
||||
## ====== ====================================================
|
||||
## Letter Meaning
|
||||
|
@ -83,24 +83,29 @@ export {
|
|||
## i inconsistent packet (e.g. SYN+RST bits both set)
|
||||
## ====== ====================================================
|
||||
##
|
||||
## If the letter is in upper case it means the event comes from the
|
||||
## originator and lower case then means the responder.
|
||||
## Also, there is compression. We only record one "d" in each direction,
|
||||
## for instance. I.e., we just record that data went in that direction.
|
||||
## This history is not meant to encode how much data that happened to
|
||||
## be.
|
||||
## If the event comes from the originator, the letter is in upper-case; if it comes
|
||||
## from the responder, it's in lower-case. Multiple packets of the same type will
|
||||
## only be noted once (e.g. we only record one "d" in each direction, regardless of
|
||||
## how many data packets were seen.)
|
||||
history: string &log &optional;
|
||||
## Number of packets the originator sent.
|
||||
## Number of packets that the originator sent.
|
||||
## Only set if :bro:id:`use_conn_size_analyzer` = T
|
||||
orig_pkts: count &log &optional;
|
||||
## Number IP level bytes the originator sent (as seen on the wire,
|
||||
## Number of IP level bytes that the originator sent (as seen on the wire,
|
||||
## taken from IP total_length header field).
|
||||
## Only set if :bro:id:`use_conn_size_analyzer` = T
|
||||
orig_ip_bytes: count &log &optional;
|
||||
## Number of packets the responder sent. See ``orig_pkts``.
|
||||
## Number of packets that the responder sent.
|
||||
## Only set if :bro:id:`use_conn_size_analyzer` = T
|
||||
resp_pkts: count &log &optional;
|
||||
## Number IP level bytes the responder sent. See ``orig_pkts``.
|
||||
## Number og IP level bytes that the responder sent (as seen on the wire,
|
||||
## taken from IP total_length header field).
|
||||
## Only set if :bro:id:`use_conn_size_analyzer` = T
|
||||
resp_ip_bytes: count &log &optional;
|
||||
## If this connection was over a tunnel, indicate the
|
||||
## *uid* values for any encapsulating parent connections
|
||||
## used over the lifetime of this inner connection.
|
||||
tunnel_parents: set[string] &log;
|
||||
};
|
||||
|
||||
## Event that can be handled to access the :bro:type:`Conn::Info`
|
||||
|
@ -190,13 +195,15 @@ function set_conn(c: connection, eoc: bool)
|
|||
c$conn$ts=c$start_time;
|
||||
c$conn$uid=c$uid;
|
||||
c$conn$id=c$id;
|
||||
if ( c?$tunnel && |c$tunnel| > 0 )
|
||||
add c$conn$tunnel_parents[c$tunnel[|c$tunnel|-1]$uid];
|
||||
c$conn$proto=get_port_transport_proto(c$id$resp_p);
|
||||
if( |Site::local_nets| > 0 )
|
||||
c$conn$local_orig=Site::is_local_addr(c$id$orig_h);
|
||||
|
||||
|
||||
if ( eoc )
|
||||
{
|
||||
if ( c$duration > 0secs )
|
||||
if ( c$duration > 0secs )
|
||||
{
|
||||
c$conn$duration=c$duration;
|
||||
c$conn$orig_bytes=c$orig$size;
|
||||
|
@ -212,7 +219,7 @@ function set_conn(c: connection, eoc: bool)
|
|||
c$conn$resp_ip_bytes = c$resp$num_bytes_ip;
|
||||
}
|
||||
local service = determine_service(c);
|
||||
if ( service != "" )
|
||||
if ( service != "" )
|
||||
c$conn$service=service;
|
||||
c$conn$conn_state=conn_state(c, get_port_transport_proto(c$id$resp_p));
|
||||
|
||||
|
@ -224,10 +231,18 @@ function set_conn(c: connection, eoc: bool)
|
|||
event content_gap(c: connection, is_orig: bool, seq: count, length: count) &priority=5
|
||||
{
|
||||
set_conn(c, F);
|
||||
|
||||
|
||||
c$conn$missed_bytes = c$conn$missed_bytes + length;
|
||||
}
|
||||
|
||||
|
||||
event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5
|
||||
{
|
||||
set_conn(c, F);
|
||||
if ( |e| > 0 )
|
||||
add c$conn$tunnel_parents[e[|e|-1]$uid];
|
||||
c$tunnel = e;
|
||||
}
|
||||
|
||||
event connection_state_remove(c: connection) &priority=5
|
||||
{
|
||||
set_conn(c, T);
|
||||
|
|
49
scripts/base/protocols/conn/polling.bro
Normal file
49
scripts/base/protocols/conn/polling.bro
Normal file
|
@ -0,0 +1,49 @@
|
|||
##! Implements a generic way to poll connections looking for certain features
|
||||
##! (e.g. monitor bytes transferred). The specific feature of a connection
|
||||
##! to look for, the polling interval, and the code to execute if the feature
|
||||
##! is found are all controlled by user-defined callback functions.
|
||||
|
||||
module ConnPolling;
|
||||
|
||||
export {
|
||||
## Starts monitoring a given connection.
|
||||
##
|
||||
## c: The connection to watch.
|
||||
##
|
||||
## callback: A callback function that takes as arguments the monitored
|
||||
## *connection*, and counter *cnt* that increments each time the
|
||||
## callback is called. It returns an interval indicating how long
|
||||
## in the future to schedule an event which will call the
|
||||
## callback. A negative return interval causes polling to stop.
|
||||
##
|
||||
## cnt: The initial value of a counter which gets passed to *callback*.
|
||||
##
|
||||
## i: The initial interval at which to schedule the next callback.
|
||||
## May be ``0secs`` to poll right away.
|
||||
global watch: function(c: connection,
|
||||
callback: function(c: connection, cnt: count): interval,
|
||||
cnt: count, i: interval);
|
||||
}
|
||||
|
||||
event ConnPolling::check(c: connection,
|
||||
callback: function(c: connection, cnt: count): interval,
|
||||
cnt: count)
|
||||
{
|
||||
if ( ! connection_exists(c$id) )
|
||||
return;
|
||||
|
||||
lookup_connection(c$id); # updates the conn val
|
||||
|
||||
local next_interval = callback(c, cnt);
|
||||
if ( next_interval < 0secs )
|
||||
return;
|
||||
|
||||
watch(c, callback, cnt + 1, next_interval);
|
||||
}
|
||||
|
||||
function watch(c: connection,
|
||||
callback: function(c: connection, cnt: count): interval,
|
||||
cnt: count, i: interval)
|
||||
{
|
||||
schedule i { ConnPolling::check(c, callback, cnt) };
|
||||
}
|
|
@ -46,27 +46,29 @@ export {
|
|||
AA: bool &log &default=F;
|
||||
## The Truncation bit specifies that the message was truncated.
|
||||
TC: bool &log &default=F;
|
||||
## The Recursion Desired bit indicates to a name server to recursively
|
||||
## purse the query.
|
||||
## The Recursion Desired bit in a request message indicates that
|
||||
## the client wants recursive service for this query.
|
||||
RD: bool &log &default=F;
|
||||
## The Recursion Available bit in a response message indicates if
|
||||
## The Recursion Available bit in a response message indicates that
|
||||
## the name server supports recursive queries.
|
||||
RA: bool &log &default=F;
|
||||
## A reserved field that is currently supposed to be zero in all
|
||||
## queries and responses.
|
||||
Z: count &log &default=0;
|
||||
## The set of resource descriptions in answer of the query.
|
||||
## The set of resource descriptions in the query answer.
|
||||
answers: vector of string &log &optional;
|
||||
## The caching intervals of the associated RRs described by the
|
||||
## ``answers`` field.
|
||||
TTLs: vector of interval &log &optional;
|
||||
## The DNS query was rejected by the server.
|
||||
rejected: bool &log &default=F;
|
||||
|
||||
## This value indicates if this request/response pair is ready to be
|
||||
## logged.
|
||||
ready: bool &default=F;
|
||||
## The total number of resource records in a reply message's answer
|
||||
## section.
|
||||
total_answers: count &optional;
|
||||
total_answers: count &default=0;
|
||||
## The total number of resource records in a reply message's answer,
|
||||
## authority, and additional sections.
|
||||
total_replies: count &optional;
|
||||
|
@ -151,11 +153,11 @@ function set_session(c: connection, msg: dns_msg, is_query: bool)
|
|||
|
||||
c$dns = c$dns_state$pending[msg$id];
|
||||
|
||||
c$dns$rcode = msg$rcode;
|
||||
c$dns$rcode_name = base_errors[msg$rcode];
|
||||
|
||||
if ( ! is_query )
|
||||
{
|
||||
c$dns$rcode = msg$rcode;
|
||||
c$dns$rcode_name = base_errors[msg$rcode];
|
||||
|
||||
if ( ! c$dns?$total_answers )
|
||||
c$dns$total_answers = msg$num_answers;
|
||||
|
||||
|
@ -175,10 +177,13 @@ function set_session(c: connection, msg: dns_msg, is_query: bool)
|
|||
}
|
||||
}
|
||||
|
||||
event dns_message(c: connection, is_orig: bool, msg: dns_msg, len: count) &priority=5
|
||||
{
|
||||
set_session(c, msg, is_orig);
|
||||
}
|
||||
|
||||
event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=5
|
||||
{
|
||||
set_session(c, msg, F);
|
||||
|
||||
if ( ans$answer_type == DNS_ANS )
|
||||
{
|
||||
c$dns$AA = msg$AA;
|
||||
|
@ -198,7 +203,8 @@ event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string)
|
|||
c$dns$TTLs[|c$dns$TTLs|] = ans$TTL;
|
||||
}
|
||||
|
||||
if ( c$dns?$answers && |c$dns$answers| == c$dns$total_answers )
|
||||
if ( c$dns?$answers && c$dns?$total_answers &&
|
||||
|c$dns$answers| == c$dns$total_answers )
|
||||
{
|
||||
add c$dns_state$finished_answers[c$dns$trans_id];
|
||||
# Indicate this request/reply pair is ready to be logged.
|
||||
|
@ -219,8 +225,6 @@ event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string)
|
|||
|
||||
event dns_request(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count) &priority=5
|
||||
{
|
||||
set_session(c, msg, T);
|
||||
|
||||
c$dns$RD = msg$RD;
|
||||
c$dns$TC = msg$TC;
|
||||
c$dns$qclass = qclass;
|
||||
|
@ -310,11 +314,9 @@ event dns_SRV_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5
|
|||
#
|
||||
# }
|
||||
|
||||
|
||||
event dns_rejected(c: connection, msg: dns_msg,
|
||||
query: string, qtype: count, qclass: count) &priority=5
|
||||
event dns_rejected(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count) &priority=5
|
||||
{
|
||||
set_session(c, msg, F);
|
||||
c$dns$rejected = T;
|
||||
}
|
||||
|
||||
event connection_state_remove(c: connection) &priority=-5
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
@load ./utils-commands
|
||||
@load ./main
|
||||
@load ./file-extract
|
||||
@load ./file-extract
|
||||
@load ./gridftp
|
||||
|
|
121
scripts/base/protocols/ftp/gridftp.bro
Normal file
121
scripts/base/protocols/ftp/gridftp.bro
Normal file
|
@ -0,0 +1,121 @@
|
|||
##! A detection script for GridFTP data and control channels.
|
||||
##!
|
||||
##! GridFTP control channels are identified by FTP control channels
|
||||
##! that successfully negotiate the GSSAPI method of an AUTH request
|
||||
##! and for which the exchange involved an encoded TLS/SSL handshake,
|
||||
##! indicating the GSI mechanism for GSSAPI was used. This analysis
|
||||
##! is all supported internally, this script simple adds the "gridftp"
|
||||
##! label to the *service* field of the control channel's
|
||||
##! :bro:type:`connection` record.
|
||||
##!
|
||||
##! GridFTP data channels are identified by a heuristic that relies on
|
||||
##! the fact that default settings for GridFTP clients typically
|
||||
##! mutally authenticate the data channel with TLS/SSL and negotiate a
|
||||
##! NULL bulk cipher (no encryption). Connections with those
|
||||
##! attributes are then polled for two minutes with decreasing frequency
|
||||
##! to check if the transfer sizes are large enough to indicate a
|
||||
##! GridFTP data channel that would be undesireable to analyze further
|
||||
##! (e.g. stop TCP reassembly). A side effect is that true connection
|
||||
##! sizes are not logged, but at the benefit of saving CPU cycles that
|
||||
##! otherwise go to analyzing the large (and likely benign) connections.
|
||||
|
||||
@load ./main
|
||||
@load base/protocols/conn
|
||||
@load base/protocols/ssl
|
||||
@load base/frameworks/notice
|
||||
|
||||
module GridFTP;
|
||||
|
||||
export {
|
||||
## Number of bytes transferred before guessing a connection is a
|
||||
## GridFTP data channel.
|
||||
const size_threshold = 1073741824 &redef;
|
||||
|
||||
## Max number of times to check whether a connection's size exceeds the
|
||||
## :bro:see:`GridFTP::size_threshold`.
|
||||
const max_poll_count = 15 &redef;
|
||||
|
||||
## Whether to skip further processing of the GridFTP data channel once
|
||||
## detected, which may help performance.
|
||||
const skip_data = T &redef;
|
||||
|
||||
## Base amount of time between checking whether a GridFTP data connection
|
||||
## has transferred more than :bro:see:`GridFTP::size_threshold` bytes.
|
||||
const poll_interval = 1sec &redef;
|
||||
|
||||
## The amount of time the base :bro:see:`GridFTP::poll_interval` is
|
||||
## increased by each poll interval. Can be used to make more frequent
|
||||
## checks at the start of a connection and gradually slow down.
|
||||
const poll_interval_increase = 1sec &redef;
|
||||
|
||||
## Raised when a GridFTP data channel is detected.
|
||||
##
|
||||
## c: The connection pertaining to the GridFTP data channel.
|
||||
global data_channel_detected: event(c: connection);
|
||||
|
||||
## The initial criteria used to determine whether to start polling
|
||||
## the connection for the :bro:see:`GridFTP::size_threshold` to have
|
||||
## been exceeded. This is called in a :bro:see:`ssl_established` event
|
||||
## handler and by default looks for both a client and server certificate
|
||||
## and for a NULL bulk cipher. One way in which this function could be
|
||||
## redefined is to make it also consider client/server certificate issuer
|
||||
## subjects.
|
||||
##
|
||||
## c: The connection which may possibly be a GridFTP data channel.
|
||||
##
|
||||
## Returns: true if the connection should be further polled for an
|
||||
## exceeded :bro:see:`GridFTP::size_threshold`, else false.
|
||||
const data_channel_initial_criteria: function(c: connection): bool &redef;
|
||||
}
|
||||
|
||||
redef record FTP::Info += {
|
||||
last_auth_requested: string &optional;
|
||||
};
|
||||
|
||||
event ftp_request(c: connection, command: string, arg: string) &priority=4
|
||||
{
|
||||
if ( command == "AUTH" && c?$ftp )
|
||||
c$ftp$last_auth_requested = arg;
|
||||
}
|
||||
|
||||
function size_callback(c: connection, cnt: count): interval
|
||||
{
|
||||
if ( c$orig$size > size_threshold || c$resp$size > size_threshold )
|
||||
{
|
||||
add c$service["gridftp-data"];
|
||||
event GridFTP::data_channel_detected(c);
|
||||
|
||||
if ( skip_data )
|
||||
skip_further_processing(c$id);
|
||||
|
||||
return -1sec;
|
||||
}
|
||||
|
||||
if ( cnt >= max_poll_count )
|
||||
return -1sec;
|
||||
|
||||
return poll_interval + poll_interval_increase * cnt;
|
||||
}
|
||||
|
||||
event ssl_established(c: connection) &priority=5
|
||||
{
|
||||
# If an FTP client requests AUTH GSSAPI and later an SSL handshake
|
||||
# finishes, it's likely a GridFTP control channel, so add service label.
|
||||
if ( c?$ftp && c$ftp?$last_auth_requested &&
|
||||
/GSSAPI/ in c$ftp$last_auth_requested )
|
||||
add c$service["gridftp"];
|
||||
}
|
||||
|
||||
function data_channel_initial_criteria(c: connection): bool
|
||||
{
|
||||
return ( c?$ssl && c$ssl?$client_subject && c$ssl?$subject &&
|
||||
c$ssl?$cipher && /WITH_NULL/ in c$ssl$cipher );
|
||||
}
|
||||
|
||||
event ssl_established(c: connection) &priority=-3
|
||||
{
|
||||
# By default GridFTP data channels do mutual authentication and
|
||||
# negotiate a cipher suite with a NULL bulk cipher.
|
||||
if ( data_channel_initial_criteria(c) )
|
||||
ConnPolling::watch(c, size_callback, 0, 0secs);
|
||||
}
|
|
@ -7,6 +7,7 @@
|
|||
@load ./utils-commands
|
||||
@load base/utils/paths
|
||||
@load base/utils/numbers
|
||||
@load base/utils/addrs
|
||||
|
||||
|
||||
module FTP;
|
||||
|
@ -29,7 +30,9 @@ export {
|
|||
type Info: record {
|
||||
## Time when the command was sent.
|
||||
ts: time &log;
|
||||
## Unique ID for the connection.
|
||||
uid: string &log;
|
||||
## The connection's 4-tuple of endpoint addresses/ports.
|
||||
id: conn_id &log;
|
||||
## User name for the current FTP session.
|
||||
user: string &log &default="<unknown>";
|
||||
|
@ -169,12 +172,7 @@ function ftp_message(s: Info)
|
|||
|
||||
local arg = s$cmdarg$arg;
|
||||
if ( s$cmdarg$cmd in file_cmds )
|
||||
{
|
||||
if ( is_v4_addr(s$id$resp_h) )
|
||||
arg = fmt("ftp://%s%s", s$id$resp_h, build_path_compressed(s$cwd, arg));
|
||||
else
|
||||
arg = fmt("ftp://[%s]%s", s$id$resp_h, build_path_compressed(s$cwd, arg));
|
||||
}
|
||||
arg = fmt("ftp://%s%s", addr_to_uri(s$id$resp_h), build_path_compressed(s$cwd, arg));
|
||||
|
||||
s$ts=s$cmdarg$ts;
|
||||
s$command=s$cmdarg$cmd;
|
||||
|
|
|
@ -6,7 +6,8 @@
|
|||
@load ./utils
|
||||
|
||||
# Add the magic number signatures to the core signature set.
|
||||
redef signature_files += "base/protocols/http/file-ident.sig";
|
||||
@load-sigs ./file-ident.sig
|
||||
|
||||
# Ignore the signatures used to match files
|
||||
redef Signatures::ignored_ids += /^matchfile-/;
|
||||
|
||||
|
|
|
@ -23,7 +23,9 @@ export {
|
|||
type Info: record {
|
||||
## Timestamp for when the request happened.
|
||||
ts: time &log;
|
||||
## Unique ID for the connection.
|
||||
uid: string &log;
|
||||
## The connection's 4-tuple of endpoint addresses/ports.
|
||||
id: conn_id &log;
|
||||
## Represents the pipelined depth into the connection of this
|
||||
## request/response transaction.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
##! Utilities specific for HTTP processing.
|
||||
|
||||
@load ./main
|
||||
@load base/utils/addrs
|
||||
|
||||
module HTTP;
|
||||
|
||||
|
@ -51,7 +52,7 @@ function extract_keys(data: string, kv_splitter: pattern): string_vec
|
|||
function build_url(rec: Info): string
|
||||
{
|
||||
local uri = rec?$uri ? rec$uri : "/<missed_request>";
|
||||
local host = rec?$host ? rec$host : fmt("%s", rec$id$resp_h);
|
||||
local host = rec?$host ? rec$host : addr_to_uri(rec$id$resp_h);
|
||||
if ( rec$id$resp_p != 80/tcp )
|
||||
host = fmt("%s:%s", host, rec$id$resp_p);
|
||||
return fmt("%s%s", host, uri);
|
||||
|
|
|
@ -13,7 +13,9 @@ export {
|
|||
type Info: record {
|
||||
## Timestamp when the command was seen.
|
||||
ts: time &log;
|
||||
## Unique ID for the connection.
|
||||
uid: string &log;
|
||||
## The connection's 4-tuple of endpoint addresses/ports.
|
||||
id: conn_id &log;
|
||||
## Nick name given for the connection.
|
||||
nick: string &log &optional;
|
||||
|
|
2
scripts/base/protocols/modbus/__load__.bro
Normal file
2
scripts/base/protocols/modbus/__load__.bro
Normal file
|
@ -0,0 +1,2 @@
|
|||
@load ./consts
|
||||
@load ./main
|
67
scripts/base/protocols/modbus/consts.bro
Normal file
67
scripts/base/protocols/modbus/consts.bro
Normal file
|
@ -0,0 +1,67 @@
|
|||
|
||||
module Modbus;
|
||||
|
||||
export {
|
||||
## Standard defined Modbus function codes.
|
||||
const function_codes = {
|
||||
[0x01] = "READ_COILS",
|
||||
[0x02] = "READ_DISCRETE_INPUTS",
|
||||
[0x03] = "READ_HOLDING_REGISTERS",
|
||||
[0x04] = "READ_INPUT_REGISTERS",
|
||||
[0x05] = "WRITE_SINGLE_COIL",
|
||||
[0x06] = "WRITE_SINGLE_REGISTER",
|
||||
[0x07] = "READ_EXCEPTION_STATUS",
|
||||
[0x08] = "DIAGNOSTICS",
|
||||
[0x0B] = "GET_COMM_EVENT_COUNTER",
|
||||
[0x0C] = "GET_COMM_EVENT_LOG",
|
||||
[0x0F] = "WRITE_MULTIPLE_COILS",
|
||||
[0x10] = "WRITE_MULTIPLE_REGISTERS",
|
||||
[0x11] = "REPORT_SLAVE_ID",
|
||||
[0x14] = "READ_FILE_RECORD",
|
||||
[0x15] = "WRITE_FILE_RECORD",
|
||||
[0x16] = "MASK_WRITE_REGISTER",
|
||||
[0x17] = "READ_WRITE_MULTIPLE_REGISTERS",
|
||||
[0x18] = "READ_FIFO_QUEUE",
|
||||
[0x2B] = "ENCAP_INTERFACE_TRANSPORT",
|
||||
|
||||
# Machine/vendor/network specific functions
|
||||
[0x09] = "PROGRAM_484",
|
||||
[0x0A] = "POLL_484",
|
||||
[0x0D] = "PROGRAM_584_984",
|
||||
[0x0E] = "POLL_584_984",
|
||||
[0x12] = "PROGRAM_884_U84",
|
||||
[0x13] = "RESET_COMM_LINK_884_U84",
|
||||
[0x28] = "PROGRAM_CONCEPT",
|
||||
[0x7D] = "FIRMWARE_REPLACEMENT",
|
||||
[0x7E] = "PROGRAM_584_984_2",
|
||||
[0x7F] = "REPORT_LOCAL_ADDRESS",
|
||||
|
||||
# Exceptions
|
||||
[0x81] = "READ_COILS_EXCEPTION",
|
||||
[0x82] = "READ_DISCRETE_INPUTS_EXCEPTION",
|
||||
[0x83] = "READ_HOLDING_REGISTERS_EXCEPTION",
|
||||
[0x84] = "READ_INPUT_REGISTERS_EXCEPTION",
|
||||
[0x85] = "WRITE_SINGLE_COIL_EXCEPTION",
|
||||
[0x86] = "WRITE_SINGLE_REGISTER_EXCEPTION",
|
||||
[0x87] = "READ_EXCEPTION_STATUS_EXCEPTION",
|
||||
[0x8F] = "WRITE_MULTIPLE_COILS_EXCEPTION",
|
||||
[0x90] = "WRITE_MULTIPLE_REGISTERS_EXCEPTION",
|
||||
[0x94] = "READ_FILE_RECORD_EXCEPTION",
|
||||
[0x95] = "WRITE_FILE_RECORD_EXCEPTION",
|
||||
[0x96] = "MASK_WRITE_REGISTER_EXCEPTION",
|
||||
[0x97] = "READ_WRITE_MULTIPLE_REGISTERS_EXCEPTION",
|
||||
[0x98] = "READ_FIFO_QUEUE_EXCEPTION",
|
||||
} &default=function(i: count):string { return fmt("unknown-%d", i); } &redef;
|
||||
|
||||
const exception_codes = {
|
||||
[0x01] = "ILLEGAL_FUNCTION",
|
||||
[0x02] = "ILLEGAL_DATA_ADDRESS",
|
||||
[0x03] = "ILLEGAL_DATA_VALUE",
|
||||
[0x04] = "SLAVE_DEVICE_FAILURE",
|
||||
[0x05] = "ACKNOWLEDGE",
|
||||
[0x06] = "SLAVE_DEVICE_BUSY",
|
||||
[0x08] = "MEMORY_PARITY_ERROR",
|
||||
[0x0A] = "GATEWAY_PATH_UNAVAILABLE",
|
||||
[0x0B] = "GATEWAY_TARGET_DEVICE_FAILED_TO_RESPOND",
|
||||
} &default=function(i: count):string { return fmt("unknown-%d", i); } &redef;
|
||||
}
|
71
scripts/base/protocols/modbus/main.bro
Normal file
71
scripts/base/protocols/modbus/main.bro
Normal file
|
@ -0,0 +1,71 @@
|
|||
##! Base Modbus analysis script.
|
||||
|
||||
module Modbus;
|
||||
|
||||
@load ./consts
|
||||
|
||||
export {
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
type Info: record {
|
||||
## Time of the request.
|
||||
ts: time &log;
|
||||
## Unique identifier for the connnection.
|
||||
uid: string &log;
|
||||
## Identifier for the connection.
|
||||
id: conn_id &log;
|
||||
## The name of the function message that was sent.
|
||||
func: string &log &optional;
|
||||
## The exception if the response was a failure.
|
||||
exception: string &log &optional;
|
||||
};
|
||||
|
||||
## Event that can be handled to access the Modbus record as it is sent on
|
||||
## to the logging framework.
|
||||
global log_modbus: event(rec: Info);
|
||||
}
|
||||
|
||||
redef record connection += {
|
||||
modbus: Info &optional;
|
||||
};
|
||||
|
||||
# Configure DPD and the packet filter.
|
||||
redef capture_filters += { ["modbus"] = "tcp port 502" };
|
||||
redef dpd_config += { [ANALYZER_MODBUS] = [$ports = set(502/tcp)] };
|
||||
redef likely_server_ports += { 502/tcp };
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Modbus::LOG, [$columns=Info, $ev=log_modbus]);
|
||||
}
|
||||
|
||||
event modbus_message(c: connection, headers: ModbusHeaders, is_orig: bool) &priority=5
|
||||
{
|
||||
if ( ! c?$modbus )
|
||||
{
|
||||
c$modbus = [$ts=network_time(), $uid=c$uid, $id=c$id];
|
||||
}
|
||||
|
||||
c$modbus$ts = network_time();
|
||||
c$modbus$func = function_codes[headers$function_code];
|
||||
}
|
||||
|
||||
event modbus_message(c: connection, headers: ModbusHeaders, is_orig: bool) &priority=-5
|
||||
{
|
||||
# Only log upon replies.
|
||||
# Also, don't log now if this is an exception (log in the exception event handler)
|
||||
if ( ! is_orig && ( headers$function_code <= 0x81 || headers$function_code >= 0x98 ) )
|
||||
Log::write(LOG, c$modbus);
|
||||
}
|
||||
|
||||
event modbus_exception(c: connection, headers: ModbusHeaders, code: count) &priority=5
|
||||
{
|
||||
c$modbus$exception = exception_codes[code];
|
||||
}
|
||||
|
||||
event modbus_exception(c: connection, headers: ModbusHeaders, code: count) &priority=-5
|
||||
{
|
||||
Log::write(LOG, c$modbus);
|
||||
delete c$modbus$exception;
|
||||
}
|
||||
|
|
@ -9,33 +9,51 @@ export {
|
|||
redef enum Log::ID += { LOG };
|
||||
|
||||
type Info: record {
|
||||
## Time when the message was first seen.
|
||||
ts: time &log;
|
||||
## Unique ID for the connection.
|
||||
uid: string &log;
|
||||
## The connection's 4-tuple of endpoint addresses/ports.
|
||||
id: conn_id &log;
|
||||
## This is a number that indicates the number of messages deep into
|
||||
## this connection where this particular message was transferred.
|
||||
## A count to represent the depth of this message transaction in a single
|
||||
## connection where multiple messages were transferred.
|
||||
trans_depth: count &log;
|
||||
## Contents of the Helo header.
|
||||
helo: string &log &optional;
|
||||
## Contents of the From header.
|
||||
mailfrom: string &log &optional;
|
||||
## Contents of the Rcpt header.
|
||||
rcptto: set[string] &log &optional;
|
||||
## Contents of the Date header.
|
||||
date: string &log &optional;
|
||||
## Contents of the From header.
|
||||
from: string &log &optional;
|
||||
## Contents of the To header.
|
||||
to: set[string] &log &optional;
|
||||
## Contents of the ReplyTo header.
|
||||
reply_to: string &log &optional;
|
||||
## Contents of the MsgID header.
|
||||
msg_id: string &log &optional;
|
||||
## Contents of the In-Reply-To header.
|
||||
in_reply_to: string &log &optional;
|
||||
## Contents of the Subject header.
|
||||
subject: string &log &optional;
|
||||
## Contents of the X-Origininating-IP header.
|
||||
x_originating_ip: addr &log &optional;
|
||||
## Contents of the first Received header.
|
||||
first_received: string &log &optional;
|
||||
## Contents of the second Received header.
|
||||
second_received: string &log &optional;
|
||||
## The last message the server sent to the client.
|
||||
## The last message that the server sent to the client.
|
||||
last_reply: string &log &optional;
|
||||
## The message transmission path, as extracted from the headers.
|
||||
path: vector of addr &log &optional;
|
||||
## Value of the User-Agent header from the client.
|
||||
user_agent: string &log &optional;
|
||||
|
||||
## Indicate if the "Received: from" headers should still be processed.
|
||||
## Indicates if the "Received: from" headers should still be processed.
|
||||
process_received_from: bool &default=T;
|
||||
## Indicates if client activity has been seen, but not yet logged
|
||||
## Indicates if client activity has been seen, but not yet logged.
|
||||
has_client_activity: bool &default=F;
|
||||
};
|
||||
|
||||
|
|
2
scripts/base/protocols/socks/__load__.bro
Normal file
2
scripts/base/protocols/socks/__load__.bro
Normal file
|
@ -0,0 +1,2 @@
|
|||
@load ./consts
|
||||
@load ./main
|
40
scripts/base/protocols/socks/consts.bro
Normal file
40
scripts/base/protocols/socks/consts.bro
Normal file
|
@ -0,0 +1,40 @@
|
|||
module SOCKS;
|
||||
|
||||
export {
|
||||
type RequestType: enum {
|
||||
CONNECTION = 1,
|
||||
PORT = 2,
|
||||
UDP_ASSOCIATE = 3,
|
||||
};
|
||||
|
||||
const v5_authentication_methods: table[count] of string = {
|
||||
[0] = "No Authentication Required",
|
||||
[1] = "GSSAPI",
|
||||
[2] = "Username/Password",
|
||||
[3] = "Challenge-Handshake Authentication Protocol",
|
||||
[5] = "Challenge-Response Authentication Method",
|
||||
[6] = "Secure Sockets Layer",
|
||||
[7] = "NDS Authentication",
|
||||
[8] = "Multi-Authentication Framework",
|
||||
[255] = "No Acceptable Methods",
|
||||
} &default=function(i: count):string { return fmt("unknown-%d", i); };
|
||||
|
||||
const v4_status: table[count] of string = {
|
||||
[0x5a] = "succeeded",
|
||||
[0x5b] = "general SOCKS server failure",
|
||||
[0x5c] = "request failed because client is not running identd",
|
||||
[0x5d] = "request failed because client's identd could not confirm the user ID string in the request",
|
||||
} &default=function(i: count):string { return fmt("unknown-%d", i); };
|
||||
|
||||
const v5_status: table[count] of string = {
|
||||
[0] = "succeeded",
|
||||
[1] = "general SOCKS server failure",
|
||||
[2] = "connection not allowed by ruleset",
|
||||
[3] = "Network unreachable",
|
||||
[4] = "Host unreachable",
|
||||
[5] = "Connection refused",
|
||||
[6] = "TTL expired",
|
||||
[7] = "Command not supported",
|
||||
[8] = "Address type not supported",
|
||||
} &default=function(i: count):string { return fmt("unknown-%d", i); };
|
||||
}
|
92
scripts/base/protocols/socks/main.bro
Normal file
92
scripts/base/protocols/socks/main.bro
Normal file
|
@ -0,0 +1,92 @@
|
|||
@load base/frameworks/tunnels
|
||||
@load ./consts
|
||||
|
||||
module SOCKS;
|
||||
|
||||
export {
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
type Info: record {
|
||||
## Time when the proxy connection was first detected.
|
||||
ts: time &log;
|
||||
## Unique ID for the tunnel - may correspond to connection uid or be non-existent.
|
||||
uid: string &log;
|
||||
## The connection's 4-tuple of endpoint addresses/ports.
|
||||
id: conn_id &log;
|
||||
## Protocol version of SOCKS.
|
||||
version: count &log;
|
||||
## Username for the proxy if extracted from the network..
|
||||
user: string &log &optional;
|
||||
## Server status for the attempt at using the proxy.
|
||||
status: string &log &optional;
|
||||
## Client requested SOCKS address. Could be an address, a name or both.
|
||||
request: SOCKS::Address &log &optional;
|
||||
## Client requested port.
|
||||
request_p: port &log &optional;
|
||||
## Server bound address. Could be an address, a name or both.
|
||||
bound: SOCKS::Address &log &optional;
|
||||
## Server bound port.
|
||||
bound_p: port &log &optional;
|
||||
};
|
||||
|
||||
## Event that can be handled to access the SOCKS
|
||||
## record as it is sent on to the logging framework.
|
||||
global log_socks: event(rec: Info);
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(SOCKS::LOG, [$columns=Info, $ev=log_socks]);
|
||||
}
|
||||
|
||||
redef record connection += {
|
||||
socks: SOCKS::Info &optional;
|
||||
};
|
||||
|
||||
# Configure DPD
|
||||
redef capture_filters += { ["socks"] = "tcp port 1080" };
|
||||
redef dpd_config += { [ANALYZER_SOCKS] = [$ports = set(1080/tcp)] };
|
||||
redef likely_server_ports += { 1080/tcp };
|
||||
|
||||
function set_session(c: connection, version: count)
|
||||
{
|
||||
if ( ! c?$socks )
|
||||
c$socks = [$ts=network_time(), $id=c$id, $uid=c$uid, $version=version];
|
||||
}
|
||||
|
||||
event socks_request(c: connection, version: count, request_type: count,
|
||||
sa: SOCKS::Address, p: port, user: string) &priority=5
|
||||
{
|
||||
set_session(c, version);
|
||||
|
||||
c$socks$request = sa;
|
||||
c$socks$request_p = p;
|
||||
|
||||
# Copy this conn_id and set the orig_p to zero because in the case of SOCKS proxies there will
|
||||
# be potentially many source ports since a new proxy connection is established for each
|
||||
# proxied connection. We treat this as a singular "tunnel".
|
||||
local cid = copy(c$id);
|
||||
cid$orig_p = 0/tcp;
|
||||
Tunnel::register([$cid=cid, $tunnel_type=Tunnel::SOCKS, $payload_proxy=T]);
|
||||
}
|
||||
|
||||
event socks_reply(c: connection, version: count, reply: count, sa: SOCKS::Address, p: port) &priority=5
|
||||
{
|
||||
set_session(c, version);
|
||||
|
||||
if ( version == 5 )
|
||||
c$socks$status = v5_status[reply];
|
||||
else if ( version == 4 )
|
||||
c$socks$status = v4_status[reply];
|
||||
|
||||
c$socks$bound = sa;
|
||||
c$socks$bound_p = p;
|
||||
}
|
||||
|
||||
event socks_reply(c: connection, version: count, reply: count, sa: SOCKS::Address, p: port) &priority=-5
|
||||
{
|
||||
# This will handle the case where the analyzer failed in some way and was removed. We probably
|
||||
# don't want to log these connections.
|
||||
if ( "SOCKS" in c$service )
|
||||
Log::write(SOCKS::LOG, c$socks);
|
||||
}
|
|
@ -27,21 +27,23 @@ export {
|
|||
type Info: record {
|
||||
## Time when the SSH connection began.
|
||||
ts: time &log;
|
||||
## Unique ID for the connection.
|
||||
uid: string &log;
|
||||
## The connection's 4-tuple of endpoint addresses/ports.
|
||||
id: conn_id &log;
|
||||
## Indicates if the login was heuristically guessed to be "success"
|
||||
## or "failure".
|
||||
status: string &log &optional;
|
||||
## Direction of the connection. If the client was a local host
|
||||
## logging into an external host, this would be OUTBOUD. INBOUND
|
||||
## logging into an external host, this would be OUTBOUND. INBOUND
|
||||
## would be set for the opposite situation.
|
||||
# TODO: handle local-local and remote-remote better.
|
||||
direction: Direction &log &optional;
|
||||
## Software string given by the client.
|
||||
## Software string from the client.
|
||||
client: string &log &optional;
|
||||
## Software string given by the server.
|
||||
## Software string from the server.
|
||||
server: string &log &optional;
|
||||
## Amount of data returned from the server. This is currently
|
||||
## Amount of data returned from the server. This is currently
|
||||
## the only measure of the success heuristic and it is logged to
|
||||
## assist analysts looking at the logs to make their own determination
|
||||
## about the success on a case-by-case basis.
|
||||
|
|
|
@ -81,6 +81,8 @@ export {
|
|||
[35] = "SessionTicket TLS",
|
||||
[40] = "extended_random",
|
||||
[13172] = "next_protocol_negotiation",
|
||||
[13175] = "origin_bound_certificates",
|
||||
[13180] = "encrypted_client_certificates",
|
||||
[65281] = "renegotiation_info"
|
||||
} &default=function(i: count):string { return fmt("unknown-%d", i); };
|
||||
|
||||
|
|
|
@ -10,54 +10,69 @@ export {
|
|||
redef enum Log::ID += { LOG };
|
||||
|
||||
type Info: record {
|
||||
## Time when the SSL connection began.
|
||||
## Time when the SSL connection was first detected.
|
||||
ts: time &log;
|
||||
uid: string &log;
|
||||
## Unique ID for the connection.
|
||||
uid: string &log;
|
||||
## The connection's 4-tuple of endpoint addresses/ports.
|
||||
id: conn_id &log;
|
||||
## SSL/TLS version the server offered.
|
||||
## SSL/TLS version that the server offered.
|
||||
version: string &log &optional;
|
||||
## SSL/TLS cipher suite the server chose.
|
||||
## SSL/TLS cipher suite that the server chose.
|
||||
cipher: string &log &optional;
|
||||
## Value of the Server Name Indicator SSL/TLS extension. It
|
||||
## Value of the Server Name Indicator SSL/TLS extension. It
|
||||
## indicates the server name that the client was requesting.
|
||||
server_name: string &log &optional;
|
||||
## Session ID offered by the client for session resumption.
|
||||
session_id: string &log &optional;
|
||||
## Subject of the X.509 certificate offered by the server.
|
||||
subject: string &log &optional;
|
||||
## Subject of the signer of the X.509 certificate offered by the server.
|
||||
issuer_subject: string &log &optional;
|
||||
## NotValidBefore field value from the server certificate.
|
||||
not_valid_before: time &log &optional;
|
||||
## NotValidAfter field value from the serve certificate.
|
||||
## NotValidAfter field value from the server certificate.
|
||||
not_valid_after: time &log &optional;
|
||||
## Last alert that was seen during the connection.
|
||||
last_alert: string &log &optional;
|
||||
|
||||
|
||||
## Subject of the X.509 certificate offered by the client.
|
||||
client_subject: string &log &optional;
|
||||
## Subject of the signer of the X.509 certificate offered by the client.
|
||||
client_issuer_subject: string &log &optional;
|
||||
|
||||
## Full binary server certificate stored in DER format.
|
||||
cert: string &optional;
|
||||
## Chain of certificates offered by the server to validate its
|
||||
## Chain of certificates offered by the server to validate its
|
||||
## complete signing chain.
|
||||
cert_chain: vector of string &optional;
|
||||
|
||||
## Full binary client certificate stored in DER format.
|
||||
client_cert: string &optional;
|
||||
## Chain of certificates offered by the client to validate its
|
||||
## complete signing chain.
|
||||
client_cert_chain: vector of string &optional;
|
||||
|
||||
## The analyzer ID used for the analyzer instance attached
|
||||
## to each connection. It is not used for logging since it's a
|
||||
## meaningless arbitrary number.
|
||||
analyzer_id: count &optional;
|
||||
};
|
||||
|
||||
|
||||
## The default root CA bundle. By loading the
|
||||
## mozilla-ca-list.bro script it will be set to Mozilla's root CA list.
|
||||
const root_certs: table[string] of string = {} &redef;
|
||||
|
||||
|
||||
## If true, detach the SSL analyzer from the connection to prevent
|
||||
## continuing to process encrypted traffic. Helps with performance
|
||||
## (especially with large file transfers).
|
||||
const disable_analyzer_after_detection = T &redef;
|
||||
|
||||
|
||||
## The openssl command line utility. If it's in the path the default
|
||||
## value will work, otherwise a full path string can be supplied for the
|
||||
## utility.
|
||||
const openssl_util = "openssl" &redef;
|
||||
|
||||
|
||||
## Event that can be handled to access the SSL
|
||||
## record as it is sent on to the logging framework.
|
||||
global log_ssl: event(rec: Info);
|
||||
|
@ -82,7 +97,8 @@ redef Protocols::common_ports += { ["SSL"] = ports };
|
|||
function set_session(c: connection)
|
||||
{
|
||||
if ( ! c?$ssl )
|
||||
c$ssl = [$ts=network_time(), $uid=c$uid, $id=c$id, $cert_chain=vector()];
|
||||
c$ssl = [$ts=network_time(), $uid=c$uid, $id=c$id, $cert_chain=vector(),
|
||||
$client_cert_chain=vector()];
|
||||
}
|
||||
|
||||
function finish(c: connection)
|
||||
|
@ -116,22 +132,40 @@ event x509_certificate(c: connection, is_orig: bool, cert: X509, chain_idx: coun
|
|||
|
||||
# We aren't doing anything with client certificates yet.
|
||||
if ( is_orig )
|
||||
return;
|
||||
|
||||
if ( chain_idx == 0 )
|
||||
{
|
||||
# Save the primary cert.
|
||||
c$ssl$cert = der_cert;
|
||||
if ( chain_idx == 0 )
|
||||
{
|
||||
# Save the primary cert.
|
||||
c$ssl$client_cert = der_cert;
|
||||
|
||||
# Also save other certificate information about the primary cert.
|
||||
c$ssl$subject = cert$subject;
|
||||
c$ssl$not_valid_before = cert$not_valid_before;
|
||||
c$ssl$not_valid_after = cert$not_valid_after;
|
||||
# Also save other certificate information about the primary cert.
|
||||
c$ssl$client_subject = cert$subject;
|
||||
c$ssl$client_issuer_subject = cert$issuer;
|
||||
}
|
||||
else
|
||||
{
|
||||
# Otherwise, add it to the cert validation chain.
|
||||
c$ssl$client_cert_chain[|c$ssl$client_cert_chain|] = der_cert;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
# Otherwise, add it to the cert validation chain.
|
||||
c$ssl$cert_chain[|c$ssl$cert_chain|] = der_cert;
|
||||
if ( chain_idx == 0 )
|
||||
{
|
||||
# Save the primary cert.
|
||||
c$ssl$cert = der_cert;
|
||||
|
||||
# Also save other certificate information about the primary cert.
|
||||
c$ssl$subject = cert$subject;
|
||||
c$ssl$issuer_subject = cert$issuer;
|
||||
c$ssl$not_valid_before = cert$not_valid_before;
|
||||
c$ssl$not_valid_after = cert$not_valid_after;
|
||||
}
|
||||
else
|
||||
{
|
||||
# Otherwise, add it to the cert validation chain.
|
||||
c$ssl$cert_chain[|c$ssl$cert_chain|] = der_cert;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -10,9 +10,11 @@ export {
|
|||
redef enum Log::ID += { LOG };
|
||||
|
||||
type Info: record {
|
||||
## Timestamp of when the syslog message was seen.
|
||||
## Timestamp when the syslog message was seen.
|
||||
ts: time &log;
|
||||
## Unique ID for the connection.
|
||||
uid: string &log;
|
||||
## The connection's 4-tuple of endpoint addresses/ports.
|
||||
id: conn_id &log;
|
||||
## Protocol over which the message was seen.
|
||||
proto: transport_proto &log;
|
||||
|
|
|
@ -98,3 +98,18 @@ function find_ip_addresses(input: string): string_array
|
|||
}
|
||||
return output;
|
||||
}
|
||||
|
||||
## Returns the string representation of an IP address suitable for inclusion
|
||||
## in a URI. For IPv4, this does no special formatting, but for IPv6, the
|
||||
## address is included in square brackets.
|
||||
##
|
||||
## a: the address to make suitable for URI inclusion.
|
||||
##
|
||||
## Returns: the string representation of *a* suitable for URI inclusion.
|
||||
function addr_to_uri(a: addr): string
|
||||
{
|
||||
if ( is_v4_addr(a) )
|
||||
return fmt("%s", a);
|
||||
else
|
||||
return fmt("[%s]", a);
|
||||
}
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
@load ./addrs
|
||||
|
||||
## This function can be used to generate a consistent filename for when
|
||||
## contents of a file, stream, or connection are being extracted to disk.
|
||||
function generate_extraction_filename(prefix: string, c: connection, suffix: string): string
|
||||
{
|
||||
local conn_info = fmt("%s:%d-%s:%d",
|
||||
c$id$orig_h, c$id$orig_p, c$id$resp_h, c$id$resp_p);
|
||||
local conn_info = fmt("%s:%d-%s:%d", addr_to_uri(c$id$orig_h), c$id$orig_p,
|
||||
addr_to_uri(c$id$resp_h), c$id$resp_p);
|
||||
|
||||
if ( prefix != "" )
|
||||
conn_info = fmt("%s_%s", prefix, conn_info);
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
##! Functions for creating and working with patterns.
|
||||
|
||||
module GLOBAL;
|
||||
|
||||
## Given a pattern as a string with two tildes (~~) contained in it, it will
|
||||
## return a pattern with string set's elements OR'd together where the
|
||||
## double-tilde was given (this function only works at or before init time).
|
||||
|
|
|
@ -8,27 +8,31 @@ export {
|
|||
## Address space that is considered private and unrouted.
|
||||
## By default it has RFC defined non-routable IPv4 address space.
|
||||
const private_address_space: set[subnet] = {
|
||||
10.0.0.0/8,
|
||||
192.168.0.0/16,
|
||||
127.0.0.0/8,
|
||||
172.16.0.0/12
|
||||
10.0.0.0/8,
|
||||
192.168.0.0/16,
|
||||
172.16.0.0/12,
|
||||
100.64.0.0/10, # RFC6598 Carrier Grade NAT
|
||||
127.0.0.0/8,
|
||||
[fe80::]/10,
|
||||
[::1]/128,
|
||||
} &redef;
|
||||
|
||||
## Networks that are considered "local".
|
||||
const local_nets: set[subnet] &redef;
|
||||
|
||||
## This is used for retrieving the subnet when you multiple
|
||||
## :bro:id:`Site::local_nets`. A membership query can be done with an
|
||||
## :bro:type:`addr` and the table will yield the subnet it was found
|
||||
|
||||
## This is used for retrieving the subnet when using multiple entries in
|
||||
## :bro:id:`Site::local_nets`. It's populated automatically from there.
|
||||
## A membership query can be done with an
|
||||
## :bro:type:`addr` and the table will yield the subnet it was found
|
||||
## within.
|
||||
global local_nets_table: table[subnet] of subnet = {};
|
||||
|
||||
## Networks that are considered "neighbors".
|
||||
const neighbor_nets: set[subnet] &redef;
|
||||
|
||||
|
||||
## If local network administrators are known and they have responsibility
|
||||
## for defined address space, then a mapping can be defined here between
|
||||
## networks for which they have responsibility and a set of email
|
||||
## networks for which they have responsibility and a set of email
|
||||
## addresses.
|
||||
const local_admins: table[subnet] of set[string] = {} &redef;
|
||||
|
||||
|
@ -40,27 +44,33 @@ export {
|
|||
|
||||
## Function that returns true if an address corresponds to one of
|
||||
## the local networks, false if not.
|
||||
## The function inspects :bro:id:`Site::local_nets`.
|
||||
global is_local_addr: function(a: addr): bool;
|
||||
|
||||
|
||||
## Function that returns true if an address corresponds to one of
|
||||
## the neighbor networks, false if not.
|
||||
## The function inspects :bro:id:`Site::neighbor_nets`.
|
||||
global is_neighbor_addr: function(a: addr): bool;
|
||||
|
||||
|
||||
## Function that returns true if an address corresponds to one of
|
||||
## the private/unrouted networks, false if not.
|
||||
## The function inspects :bro:id:`Site::private_address_space`.
|
||||
global is_private_addr: function(a: addr): bool;
|
||||
|
||||
## Function that returns true if a host name is within a local
|
||||
## Function that returns true if a host name is within a local
|
||||
## DNS zone.
|
||||
## The function inspects :bro:id:`Site::local_zones`.
|
||||
global is_local_name: function(name: string): bool;
|
||||
|
||||
## Function that returns true if a host name is within a neighbor
|
||||
|
||||
## Function that returns true if a host name is within a neighbor
|
||||
## DNS zone.
|
||||
## The function inspects :bro:id:`Site::neighbor_zones`.
|
||||
global is_neighbor_name: function(name: string): bool;
|
||||
|
||||
|
||||
## Function that returns a common separated list of email addresses
|
||||
## that are considered administrators for the IP address provided as
|
||||
## an argument.
|
||||
## The function inspects :bro:id:`Site::local_admins`.
|
||||
global get_emails: function(a: addr): string;
|
||||
}
|
||||
|
||||
|
@ -73,22 +83,22 @@ function is_local_addr(a: addr): bool
|
|||
{
|
||||
return a in local_nets;
|
||||
}
|
||||
|
||||
|
||||
function is_neighbor_addr(a: addr): bool
|
||||
{
|
||||
return a in neighbor_nets;
|
||||
}
|
||||
|
||||
|
||||
function is_private_addr(a: addr): bool
|
||||
{
|
||||
return a in private_address_space;
|
||||
}
|
||||
|
||||
|
||||
function is_local_name(name: string): bool
|
||||
{
|
||||
return local_dns_suffix_regex in name;
|
||||
}
|
||||
|
||||
|
||||
function is_neighbor_name(name: string): bool
|
||||
{
|
||||
return local_dns_neighbor_suffix_regex in name;
|
||||
|
@ -96,7 +106,7 @@ function is_neighbor_name(name: string): bool
|
|||
|
||||
# This is a hack for doing a for loop.
|
||||
const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32};
|
||||
|
||||
|
||||
# TODO: make this work with IPv6
|
||||
function find_all_emails(ip: addr): set[string]
|
||||
{
|
||||
|
|
25
scripts/base/utils/urls.bro
Normal file
25
scripts/base/utils/urls.bro
Normal file
|
@ -0,0 +1,25 @@
|
|||
## Functions for URL handling.
|
||||
|
||||
## A regular expression for matching and extracting URLs.
|
||||
const url_regex = /^([a-zA-Z\-]{3,5})(:\/\/[^\/?#"'\r\n><]*)([^?#"'\r\n><]*)([^[:blank:]\r\n"'><]*|\??[^"'\r\n><]*)/ &redef;
|
||||
|
||||
## Extracts URLs discovered in arbitrary text.
|
||||
function find_all_urls(s: string): string_set
|
||||
{
|
||||
return find_all(s, url_regex);
|
||||
}
|
||||
|
||||
## Extracts URLs discovered in arbitrary text without
|
||||
## the URL scheme included.
|
||||
function find_all_urls_without_scheme(s: string): string_set
|
||||
{
|
||||
local urls = find_all_urls(s);
|
||||
local return_urls: set[string] = set();
|
||||
for ( url in urls )
|
||||
{
|
||||
local no_scheme = sub(url, /^([a-zA-Z\-]{3,5})(:\/\/)/, "");
|
||||
add return_urls[no_scheme];
|
||||
}
|
||||
|
||||
return return_urls;
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue