Merge remote-tracking branch 'origin/master' into topic/dnthayer/alarms-mail

This commit is contained in:
Daniel Thayer 2012-10-30 11:32:58 -05:00
commit 0f97f0b6e4
618 changed files with 11183 additions and 2057 deletions

View file

@ -10,14 +10,14 @@ export {
## The communication logging stream identifier.
redef enum Log::ID += { LOG };
## Which interface to listen on. The addresses ``0.0.0.0`` and ``[::]``
## are wildcards.
const listen_interface = 0.0.0.0 &redef;
## Which port to listen on.
const listen_port = 47757/tcp &redef;
## This defines if a listening socket should use SSL.
const listen_ssl = F &redef;
@ -34,7 +34,7 @@ export {
## :bro:id:`Communication::listen_port` if it's already in use.
const listen_retry = 30 secs &redef;
## Default compression level. Compression level is 0-9, with 0 = no
## Default compression level. Compression level is 0-9, with 0 = no
## compression.
global compression_level = 0 &redef;
@ -42,7 +42,7 @@ export {
type Info: record {
## The network time at which a communication event occurred.
ts: time &log;
## The peer name (if any) for which a communication event is concerned.
## The peer name (if any) with which a communication event is concerned.
peer: string &log &optional;
## Where the communication event message originated from, that is,
## either from the scripting layer or inside the Bro process.
@ -70,7 +70,7 @@ export {
## If the *host* field is a non-global IPv6 address, this field
## can specify a particular :rfc:`4007` ``zone_id``.
zone_id: string &optional;
## Port of the remote Bro communication endpoint if we are initiating
## the connection based on the :bro:id:`connect` field.
p: port &optional;
@ -120,7 +120,7 @@ export {
## The remote peer.
peer: event_peer &optional;
## Indicates the status of the node.
connected: bool &default = F;
};
@ -163,7 +163,7 @@ event bro_init() &priority=5
function do_script_log_common(level: count, src: count, msg: string)
{
Log::write(Communication::LOG, [$ts = network_time(),
Log::write(Communication::LOG, [$ts = network_time(),
$level = (level == REMOTE_LOG_INFO ? "info" : "error"),
$src_name = src_names[src],
$peer = get_event_peer()$descr,
@ -199,9 +199,9 @@ function connect_peer(peer: string)
local class = node?$class ? node$class : "";
local zone_id = node?$zone_id ? node$zone_id : "";
local id = connect(node$host, zone_id, p, class, node$retry, node$ssl);
if ( id == PEER_ID_NONE )
Log::write(Communication::LOG, [$ts = network_time(),
Log::write(Communication::LOG, [$ts = network_time(),
$peer = get_event_peer()$descr,
$message = "can't trigger connect"]);
pending_peers[id] = node;
@ -340,7 +340,7 @@ event bro_init() &priority = -10 # let others modify nodes
{
if ( |nodes| > 0 )
enable_communication();
for ( tag in nodes )
{
if ( ! nodes[tag]$connect )

View file

@ -8,8 +8,16 @@ export {
## The default input reader used. Defaults to `READER_ASCII`.
const default_reader = READER_ASCII &redef;
## The default reader mode used. Defaults to `MANUAL`.
const default_mode = MANUAL &redef;
## Flag that controls if the input framework accepts records
## that contain types that are not supported (at the moment
## file and function). If true, the input framework will
## warn in these cases, but continue. If false, it will
## abort. Defaults to false (abort)
const accept_unsupported_types = F &redef;
## TableFilter description type used for the `table` method.
type TableDescription: record {
## Common definitions for tables and events
@ -82,11 +90,11 @@ export {
## Record describing the fields to be retrieved from the source input.
fields: any;
## If want_record if false (default), the event receives each value in fields as a seperate argument.
## If it is set to true, the event receives all fields in a signle record value.
want_record: bool &default=F;
## If want_record if false, the event receives each value in fields as a separate argument.
## If it is set to true (default), the event receives all fields in a single record value.
want_record: bool &default=T;
## The event that is rised each time a new line is received from the reader.
## The event that is raised each time a new line is received from the reader.
## The event will receive an Input::Event enum as the first element, and the fields as the following arguments.
ev: any;
@ -106,7 +114,8 @@ export {
## description: `TableDescription` record describing the source.
global add_event: function(description: Input::EventDescription) : bool;
## Remove a input stream. Returns true on success and false if the named stream was not found.
## Remove a input stream. Returns true on success and false if the named stream was
## not found.
##
## id: string value identifying the stream to be removed
global remove: function(id: string) : bool;
@ -117,8 +126,9 @@ export {
## id: string value identifying the stream
global force_update: function(id: string) : bool;
## Event that is called, when the update of a specific source is finished
global update_finished: event(name: string, source:string);
## Event that is called, when the end of a data source has been reached, including
## after an update.
global end_of_data: event(name: string, source:string);
}
@load base/input.bif

View file

@ -2,4 +2,5 @@
@load ./postprocessors
@load ./writers/ascii
@load ./writers/dataseries
@load ./writers/elasticsearch
@load ./writers/none

View file

@ -99,6 +99,12 @@ export {
## file name. Generally, filenames are expected to given
## without any extensions; writers will add appropiate
## extensions automatically.
##
## If this path is found to conflict with another filter's
## for the same writer type, it is automatically corrected
## by appending "-N", where N is the smallest integer greater
## or equal to 2 that allows the corrected path name to not
## conflict with another filter's.
path: string &optional;
## A function returning the output path for recording entries
@ -118,7 +124,10 @@ export {
## rec: An instance of the streams's ``columns`` type with its
## fields set to the values to be logged.
##
## Returns: The path to be used for the filter.
## Returns: The path to be used for the filter, which will be subject
## to the same automatic correction rules as the *path*
## field of :bro:type:`Log::Filter` in the case of conflicts
## with other filters trying to use the same writer/path pair.
path_func: function(id: ID, path: string, rec: any): string &optional;
## Subset of column names to record. If not given, all
@ -321,6 +330,11 @@ export {
## Log::default_rotation_postprocessor_cmd
## Log::default_rotation_postprocessors
global run_rotation_postprocessor_cmd: function(info: RotationInfo, npath: string) : bool;
## The streams which are currently active and not disabled.
## This table is not meant to be modified by users! Only use it for
## examining which streams are active.
global active_streams: table[ID] of Stream = table();
}
# We keep a script-level copy of all filters so that we can manipulate them.
@ -335,22 +349,23 @@ function __default_rotation_postprocessor(info: RotationInfo) : bool
{
if ( info$writer in default_rotation_postprocessors )
return default_rotation_postprocessors[info$writer](info);
return F;
else
# Return T by default so that postprocessor-less writers don't shutdown.
return T;
}
function default_path_func(id: ID, path: string, rec: any) : string
{
# The suggested path value is a previous result of this function
# or a filter path explicitly set by the user, so continue using it.
if ( path != "" )
return path;
local id_str = fmt("%s", id);
local parts = split1(id_str, /::/);
if ( |parts| == 2 )
{
# The suggested path value is a previous result of this function
# or a filter path explicitly set by the user, so continue using it.
if ( path != "" )
return path;
# Example: Notice::LOG -> "notice"
if ( parts[2] == "LOG" )
{
@ -405,11 +420,15 @@ function create_stream(id: ID, stream: Stream) : bool
if ( ! __create_stream(id, stream) )
return F;
active_streams[id] = stream;
return add_default_filter(id);
}
function disable_stream(id: ID) : bool
{
delete active_streams[id];
return __disable_stream(id);
}

View file

@ -8,12 +8,13 @@ export {
## into files. This is primarily for debugging purposes.
const output_to_stdout = F &redef;
## If true, include a header line with column names and description
## of the other ASCII logging options that were used.
const include_header = T &redef;
## If true, include lines with log meta information such as column names with
## types, the values of ASCII logging options that in use, and the time when the
## file was opened and closes (the latter at the end).
const include_meta = T &redef;
## Prefix for the header line if included.
const header_prefix = "#" &redef;
## Prefix for lines with meta information.
const meta_prefix = "#" &redef;
## Separator between fields.
const separator = "\t" &redef;

View file

@ -0,0 +1,48 @@
##! Log writer for sending logs to an ElasticSearch server.
##!
##! Note: This module is in testing and is not yet considered stable!
##!
##! There is one known memory issue. If your elasticsearch server is
##! running slowly and taking too long to return from bulk insert
##! requests, the message queue to the writer thread will continue
##! growing larger and larger giving the appearance of a memory leak.
module LogElasticSearch;
export {
## Name of the ES cluster
const cluster_name = "elasticsearch" &redef;
## ES Server
const server_host = "127.0.0.1" &redef;
## ES Port
const server_port = 9200 &redef;
## Name of the ES index
const index_prefix = "bro" &redef;
## The ES type prefix comes before the name of the related log.
## e.g. prefix = "bro\_" would create types of bro_dns, bro_software, etc.
const type_prefix = "" &redef;
## The time before an ElasticSearch transfer will timeout. Note that
## the fractional part of the timeout will be ignored. In particular, time
## specifications less than a second result in a timeout value of 0, which
## means "no timeout."
const transfer_timeout = 2secs;
## The batch size is the number of messages that will be queued up before
## they are sent to be bulk indexed.
const max_batch_size = 1000 &redef;
## The maximum amount of wall-clock time that is allowed to pass without
## finishing a bulk log send. This represents the maximum delay you
## would like to have with your logs before they are sent to ElasticSearch.
const max_batch_interval = 1min &redef;
## The maximum byte size for a buffered JSON string to send to the bulk
## insert API.
const max_byte_size = 1024 * 1024 &redef;
}

View file

@ -23,7 +23,7 @@ redef Cluster::worker2manager_events += /Notice::cluster_notice/;
@if ( Cluster::local_node_type() != Cluster::MANAGER )
# The notice policy is completely handled by the manager and shouldn't be
# done by workers or proxies to save time for packet processing.
event bro_init() &priority=-11
event bro_init() &priority=11
{
Notice::policy = table();
}

View file

@ -1,5 +1,5 @@
##! This framework is intended to create an output and filtering path for
##! internal messages/warnings/errors. It should typically be loaded to
##! This framework is intended to create an output and filtering path for
##! internal messages/warnings/errors. It should typically be loaded to
##! avoid Bro spewing internal messages to standard error and instead log
##! them to a file in a standard way. Note that this framework deals with
##! the handling of internally-generated reporter messages, for the
@ -13,11 +13,11 @@ export {
redef enum Log::ID += { LOG };
## An indicator of reporter message severity.
type Level: enum {
type Level: enum {
## Informational, not needing specific attention.
INFO,
INFO,
## Warning of a potential problem.
WARNING,
WARNING,
## A non-fatal error that should be addressed, but doesn't
## terminate program execution.
ERROR
@ -36,24 +36,55 @@ export {
## Not all reporter messages will have locations in them though.
location: string &log &optional;
};
## Tunable for sending reporter warning messages to STDERR. The option to
## turn it off is presented here in case Bro is being run by some
## external harness and shouldn't output anything to the console.
const warnings_to_stderr = T &redef;
## Tunable for sending reporter error messages to STDERR. The option to
## turn it off is presented here in case Bro is being run by some
## external harness and shouldn't output anything to the console.
const errors_to_stderr = T &redef;
}
global stderr: file;
event bro_init() &priority=5
{
Log::create_stream(Reporter::LOG, [$columns=Info]);
if ( errors_to_stderr || warnings_to_stderr )
stderr = open("/dev/stderr");
}
event reporter_info(t: time, msg: string, location: string)
event reporter_info(t: time, msg: string, location: string) &priority=-5
{
Log::write(Reporter::LOG, [$ts=t, $level=INFO, $message=msg, $location=location]);
}
event reporter_warning(t: time, msg: string, location: string)
event reporter_warning(t: time, msg: string, location: string) &priority=-5
{
if ( warnings_to_stderr )
{
if ( t > double_to_time(0.0) )
print stderr, fmt("WARNING: %.6f %s (%s)", t, msg, location);
else
print stderr, fmt("WARNING: %s (%s)", msg, location);
}
Log::write(Reporter::LOG, [$ts=t, $level=WARNING, $message=msg, $location=location]);
}
event reporter_error(t: time, msg: string, location: string)
event reporter_error(t: time, msg: string, location: string) &priority=-5
{
if ( errors_to_stderr )
{
if ( t > double_to_time(0.0) )
print stderr, fmt("ERROR: %.6f %s (%s)", t, msg, location);
else
print stderr, fmt("ERROR: %s (%s)", msg, location);
}
Log::write(Reporter::LOG, [$ts=t, $level=ERROR, $message=msg, $location=location]);
}

View file

@ -826,7 +826,7 @@ const tcp_storm_interarrival_thresh = 1 sec &redef;
## peer's ACKs. Set to zero to turn off this determination.
##
## .. bro:see:: tcp_max_above_hole_without_any_acks tcp_excessive_data_without_further_acks
const tcp_max_initial_window = 4096;
const tcp_max_initial_window = 4096 &redef;
## If we're not seeing our peer's ACKs, the maximum volume of data above a sequence
## hole that we'll tolerate before assuming that there's been a packet drop and we
@ -834,7 +834,7 @@ const tcp_max_initial_window = 4096;
## up.
##
## .. bro:see:: tcp_max_initial_window tcp_excessive_data_without_further_acks
const tcp_max_above_hole_without_any_acks = 4096;
const tcp_max_above_hole_without_any_acks = 4096 &redef;
## If we've seen this much data without any of it being acked, we give up
## on that connection to avoid memory exhaustion due to buffering all that
@ -843,7 +843,7 @@ const tcp_max_above_hole_without_any_acks = 4096;
## has in fact gone too far, but for now we just make this quite beefy.
##
## .. bro:see:: tcp_max_initial_window tcp_max_above_hole_without_any_acks
const tcp_excessive_data_without_further_acks = 10 * 1024 * 1024;
const tcp_excessive_data_without_further_acks = 10 * 1024 * 1024 &redef;
## For services without an a handler, these sets define originator-side ports that
## still trigger reassembly.
@ -1135,10 +1135,10 @@ type ip6_ah: record {
rsv: count;
## Security Parameter Index.
spi: count;
## Sequence number.
seq: count;
## Authentication data.
data: string;
## Sequence number, unset in the case that *len* field is zero.
seq: count &optional;
## Authentication data, unset in the case that *len* field is zero.
data: string &optional;
};
## Values extracted from an IPv6 ESP extension header.
@ -2784,6 +2784,14 @@ export {
## to have a valid Teredo encapsulation.
const yielding_teredo_decapsulation = T &redef;
## With this set, the Teredo analyzer waits until it sees both sides
## of a connection using a valid Teredo encapsulation before issuing
## a :bro:see:`protocol_confirmation`. If it's false, the first
## occurence of a packet with valid Teredo encapsulation causes a
## confirmation. Both cases are still subject to effects of
## :bro:see:`Tunnel::yielding_teredo_decapsulation`.
const delay_teredo_confirmation = T &redef;
## How often to cleanup internal state for inactive IP tunnels.
const ip_tunnel_timeout = 24hrs &redef;
} # end export

View file

@ -1,3 +1,4 @@
@load ./main
@load ./contents
@load ./inactivity
@load ./polling

View file

@ -17,7 +17,7 @@ export {
type Info: record {
## This is the time of the first packet.
ts: time &log;
## A unique identifier of a connection.
## A unique identifier of the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
@ -30,7 +30,7 @@ export {
## tear-downs, this will not include the final ACK.
duration: interval &log &optional;
## The number of payload bytes the originator sent. For TCP
## this is taken from sequence numbers and might be inaccurate
## this is taken from sequence numbers and might be inaccurate
## (e.g., due to large connections)
orig_bytes: count &log &optional;
## The number of payload bytes the responder sent. See ``orig_bytes``.
@ -54,16 +54,16 @@ export {
## OTH No SYN seen, just midstream traffic (a "partial connection" that was not later closed).
## ========== ===============================================
conn_state: string &log &optional;
## If the connection is originated locally, this value will be T. If
## it was originated remotely it will be F. In the case that the
## :bro:id:`Site::local_nets` variable is undefined, this field will
## :bro:id:`Site::local_nets` variable is undefined, this field will
## be left empty at all times.
local_orig: bool &log &optional;
## Indicates the number of bytes missed in content gaps which is
## representative of packet loss. A value other than zero will
## normally cause protocol analysis to fail but some analysis may
## Indicates the number of bytes missed in content gaps, which is
## representative of packet loss. A value other than zero will
## normally cause protocol analysis to fail but some analysis may
## have been completed prior to the packet loss.
missed_bytes: count &log &default=0;
@ -83,25 +83,26 @@ export {
## i inconsistent packet (e.g. SYN+RST bits both set)
## ====== ====================================================
##
## If the letter is in upper case it means the event comes from the
## originator and lower case then means the responder.
## Also, there is compression. We only record one "d" in each direction,
## for instance. I.e., we just record that data went in that direction.
## This history is not meant to encode how much data that happened to
## be.
## If the event comes from the originator, the letter is in upper-case; if it comes
## from the responder, it's in lower-case. Multiple packets of the same type will
## only be noted once (e.g. we only record one "d" in each direction, regardless of
## how many data packets were seen.)
history: string &log &optional;
## Number of packets the originator sent.
## Number of packets that the originator sent.
## Only set if :bro:id:`use_conn_size_analyzer` = T
orig_pkts: count &log &optional;
## Number IP level bytes the originator sent (as seen on the wire,
## Number of IP level bytes that the originator sent (as seen on the wire,
## taken from IP total_length header field).
## Only set if :bro:id:`use_conn_size_analyzer` = T
orig_ip_bytes: count &log &optional;
## Number of packets the responder sent. See ``orig_pkts``.
## Number of packets that the responder sent.
## Only set if :bro:id:`use_conn_size_analyzer` = T
resp_pkts: count &log &optional;
## Number IP level bytes the responder sent. See ``orig_pkts``.
## Number og IP level bytes that the responder sent (as seen on the wire,
## taken from IP total_length header field).
## Only set if :bro:id:`use_conn_size_analyzer` = T
resp_ip_bytes: count &log &optional;
## If this connection was over a tunnel, indicate the
## If this connection was over a tunnel, indicate the
## *uid* values for any encapsulating parent connections
## used over the lifetime of this inner connection.
tunnel_parents: set[string] &log;
@ -199,10 +200,10 @@ function set_conn(c: connection, eoc: bool)
c$conn$proto=get_port_transport_proto(c$id$resp_p);
if( |Site::local_nets| > 0 )
c$conn$local_orig=Site::is_local_addr(c$id$orig_h);
if ( eoc )
{
if ( c$duration > 0secs )
if ( c$duration > 0secs )
{
c$conn$duration=c$duration;
c$conn$orig_bytes=c$orig$size;
@ -218,7 +219,7 @@ function set_conn(c: connection, eoc: bool)
c$conn$resp_ip_bytes = c$resp$num_bytes_ip;
}
local service = determine_service(c);
if ( service != "" )
if ( service != "" )
c$conn$service=service;
c$conn$conn_state=conn_state(c, get_port_transport_proto(c$id$resp_p));
@ -230,7 +231,7 @@ function set_conn(c: connection, eoc: bool)
event content_gap(c: connection, is_orig: bool, seq: count, length: count) &priority=5
{
set_conn(c, F);
c$conn$missed_bytes = c$conn$missed_bytes + length;
}
@ -241,7 +242,7 @@ event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5
add c$conn$tunnel_parents[e[|e|-1]$uid];
c$tunnel = e;
}
event connection_state_remove(c: connection) &priority=5
{
set_conn(c, T);

View file

@ -0,0 +1,49 @@
##! Implements a generic way to poll connections looking for certain features
##! (e.g. monitor bytes transferred). The specific feature of a connection
##! to look for, the polling interval, and the code to execute if the feature
##! is found are all controlled by user-defined callback functions.
module ConnPolling;
export {
## Starts monitoring a given connection.
##
## c: The connection to watch.
##
## callback: A callback function that takes as arguments the monitored
## *connection*, and counter *cnt* that increments each time the
## callback is called. It returns an interval indicating how long
## in the future to schedule an event which will call the
## callback. A negative return interval causes polling to stop.
##
## cnt: The initial value of a counter which gets passed to *callback*.
##
## i: The initial interval at which to schedule the next callback.
## May be ``0secs`` to poll right away.
global watch: function(c: connection,
callback: function(c: connection, cnt: count): interval,
cnt: count, i: interval);
}
event ConnPolling::check(c: connection,
callback: function(c: connection, cnt: count): interval,
cnt: count)
{
if ( ! connection_exists(c$id) )
return;
lookup_connection(c$id); # updates the conn val
local next_interval = callback(c, cnt);
if ( next_interval < 0secs )
return;
watch(c, callback, cnt + 1, next_interval);
}
function watch(c: connection,
callback: function(c: connection, cnt: count): interval,
cnt: count, i: interval)
{
schedule i { ConnPolling::check(c, callback, cnt) };
}

View file

@ -45,27 +45,29 @@ export {
AA: bool &log &default=F;
## The Truncation bit specifies that the message was truncated.
TC: bool &log &default=F;
## The Recursion Desired bit indicates to a name server to recursively
## purse the query.
## The Recursion Desired bit in a request message indicates that
## the client wants recursive service for this query.
RD: bool &log &default=F;
## The Recursion Available bit in a response message indicates if
## The Recursion Available bit in a response message indicates that
## the name server supports recursive queries.
RA: bool &log &default=F;
## A reserved field that is currently supposed to be zero in all
## queries and responses.
Z: count &log &default=0;
## The set of resource descriptions in answer of the query.
## The set of resource descriptions in the query answer.
answers: vector of string &log &optional;
## The caching intervals of the associated RRs described by the
## ``answers`` field.
TTLs: vector of interval &log &optional;
## The DNS query was rejected by the server.
rejected: bool &log &default=F;
## This value indicates if this request/response pair is ready to be
## logged.
ready: bool &default=F;
## The total number of resource records in a reply message's answer
## section.
total_answers: count &optional;
total_answers: count &default=0;
## The total number of resource records in a reply message's answer,
## authority, and additional sections.
total_replies: count &optional;
@ -162,11 +164,11 @@ function set_session(c: connection, msg: dns_msg, is_query: bool)
c$dns = c$dns_state$pending[msg$id];
c$dns$rcode = msg$rcode;
c$dns$rcode_name = base_errors[msg$rcode];
if ( ! is_query )
{
c$dns$rcode = msg$rcode;
c$dns$rcode_name = base_errors[msg$rcode];
if ( ! c$dns?$total_answers )
c$dns$total_answers = msg$num_answers;
@ -186,10 +188,13 @@ function set_session(c: connection, msg: dns_msg, is_query: bool)
}
}
event dns_message(c: connection, is_orig: bool, msg: dns_msg, len: count) &priority=5
{
set_session(c, msg, is_orig);
}
event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string) &priority=5
{
set_session(c, msg, F);
if ( ans$answer_type == DNS_ANS )
{
c$dns$AA = msg$AA;
@ -209,7 +214,8 @@ event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string)
c$dns$TTLs[|c$dns$TTLs|] = ans$TTL;
}
if ( c$dns?$answers && |c$dns$answers| == c$dns$total_answers )
if ( c$dns?$answers && c$dns?$total_answers &&
|c$dns$answers| == c$dns$total_answers )
{
add c$dns_state$finished_answers[c$dns$trans_id];
# Indicate this request/reply pair is ready to be logged.
@ -230,8 +236,6 @@ event DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string)
event dns_request(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count) &priority=5
{
set_session(c, msg, T);
c$dns$RD = msg$RD;
c$dns$TC = msg$TC;
c$dns$qclass = qclass;
@ -321,11 +325,9 @@ event dns_SRV_reply(c: connection, msg: dns_msg, ans: dns_answer) &priority=5
#
# }
event dns_rejected(c: connection, msg: dns_msg,
query: string, qtype: count, qclass: count) &priority=5
event dns_rejected(c: connection, msg: dns_msg, query: string, qtype: count, qclass: count) &priority=5
{
set_session(c, msg, F);
c$dns$rejected = T;
}
event connection_state_remove(c: connection) &priority=-5

View file

@ -1,3 +1,4 @@
@load ./utils-commands
@load ./main
@load ./file-extract
@load ./file-extract
@load ./gridftp

View file

@ -0,0 +1,121 @@
##! A detection script for GridFTP data and control channels.
##!
##! GridFTP control channels are identified by FTP control channels
##! that successfully negotiate the GSSAPI method of an AUTH request
##! and for which the exchange involved an encoded TLS/SSL handshake,
##! indicating the GSI mechanism for GSSAPI was used. This analysis
##! is all supported internally, this script simple adds the "gridftp"
##! label to the *service* field of the control channel's
##! :bro:type:`connection` record.
##!
##! GridFTP data channels are identified by a heuristic that relies on
##! the fact that default settings for GridFTP clients typically
##! mutally authenticate the data channel with TLS/SSL and negotiate a
##! NULL bulk cipher (no encryption). Connections with those
##! attributes are then polled for two minutes with decreasing frequency
##! to check if the transfer sizes are large enough to indicate a
##! GridFTP data channel that would be undesireable to analyze further
##! (e.g. stop TCP reassembly). A side effect is that true connection
##! sizes are not logged, but at the benefit of saving CPU cycles that
##! otherwise go to analyzing the large (and likely benign) connections.
@load ./main
@load base/protocols/conn
@load base/protocols/ssl
@load base/frameworks/notice
module GridFTP;
export {
## Number of bytes transferred before guessing a connection is a
## GridFTP data channel.
const size_threshold = 1073741824 &redef;
## Max number of times to check whether a connection's size exceeds the
## :bro:see:`GridFTP::size_threshold`.
const max_poll_count = 15 &redef;
## Whether to skip further processing of the GridFTP data channel once
## detected, which may help performance.
const skip_data = T &redef;
## Base amount of time between checking whether a GridFTP data connection
## has transferred more than :bro:see:`GridFTP::size_threshold` bytes.
const poll_interval = 1sec &redef;
## The amount of time the base :bro:see:`GridFTP::poll_interval` is
## increased by each poll interval. Can be used to make more frequent
## checks at the start of a connection and gradually slow down.
const poll_interval_increase = 1sec &redef;
## Raised when a GridFTP data channel is detected.
##
## c: The connection pertaining to the GridFTP data channel.
global data_channel_detected: event(c: connection);
## The initial criteria used to determine whether to start polling
## the connection for the :bro:see:`GridFTP::size_threshold` to have
## been exceeded. This is called in a :bro:see:`ssl_established` event
## handler and by default looks for both a client and server certificate
## and for a NULL bulk cipher. One way in which this function could be
## redefined is to make it also consider client/server certificate issuer
## subjects.
##
## c: The connection which may possibly be a GridFTP data channel.
##
## Returns: true if the connection should be further polled for an
## exceeded :bro:see:`GridFTP::size_threshold`, else false.
const data_channel_initial_criteria: function(c: connection): bool &redef;
}
redef record FTP::Info += {
last_auth_requested: string &optional;
};
event ftp_request(c: connection, command: string, arg: string) &priority=4
{
if ( command == "AUTH" && c?$ftp )
c$ftp$last_auth_requested = arg;
}
function size_callback(c: connection, cnt: count): interval
{
if ( c$orig$size > size_threshold || c$resp$size > size_threshold )
{
add c$service["gridftp-data"];
event GridFTP::data_channel_detected(c);
if ( skip_data )
skip_further_processing(c$id);
return -1sec;
}
if ( cnt >= max_poll_count )
return -1sec;
return poll_interval + poll_interval_increase * cnt;
}
event ssl_established(c: connection) &priority=5
{
# If an FTP client requests AUTH GSSAPI and later an SSL handshake
# finishes, it's likely a GridFTP control channel, so add service label.
if ( c?$ftp && c$ftp?$last_auth_requested &&
/GSSAPI/ in c$ftp$last_auth_requested )
add c$service["gridftp"];
}
function data_channel_initial_criteria(c: connection): bool
{
return ( c?$ssl && c$ssl?$client_subject && c$ssl?$subject &&
c$ssl?$cipher && /WITH_NULL/ in c$ssl$cipher );
}
event ssl_established(c: connection) &priority=-3
{
# By default GridFTP data channels do mutual authentication and
# negotiate a cipher suite with a NULL bulk cipher.
if ( data_channel_initial_criteria(c) )
ConnPolling::watch(c, size_callback, 0, 0secs);
}

View file

@ -28,7 +28,9 @@ export {
type Info: record {
## Time when the command was sent.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## User name for the current FTP session.
user: string &log &default="<unknown>";
@ -94,11 +96,11 @@ redef record connection += {
};
# Configure DPD
const ports = { 21/tcp } &redef;
redef capture_filters += { ["ftp"] = "port 21" };
const ports = { 21/tcp, 2811/tcp } &redef; # 2811/tcp is GridFTP.
redef capture_filters += { ["ftp"] = "port 21 and port 2811" };
redef dpd_config += { [ANALYZER_FTP] = [$ports = ports] };
redef likely_server_ports += { 21/tcp };
redef likely_server_ports += { 21/tcp, 2811/tcp };
# Establish the variable for tracking expected connections.
global ftp_data_expected: table[addr, port] of Info &create_expire=5mins;

View file

@ -22,7 +22,9 @@ export {
type Info: record {
## Timestamp for when the request happened.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## Represents the pipelined depth into the connection of this
## request/response transaction.
@ -112,7 +114,7 @@ event bro_init() &priority=5
# DPD configuration.
const ports = {
80/tcp, 81/tcp, 631/tcp, 1080/tcp, 3138/tcp,
80/tcp, 81/tcp, 631/tcp, 1080/tcp, 3128/tcp,
8000/tcp, 8080/tcp, 8888/tcp,
};
redef dpd_config += {

View file

@ -11,7 +11,9 @@ export {
type Info: record {
## Timestamp when the command was seen.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## Nick name given for the connection.
nick: string &log &optional;

View file

@ -8,33 +8,51 @@ export {
redef enum Log::ID += { LOG };
type Info: record {
## Time when the message was first seen.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## This is a number that indicates the number of messages deep into
## this connection where this particular message was transferred.
## A count to represent the depth of this message transaction in a single
## connection where multiple messages were transferred.
trans_depth: count &log;
## Contents of the Helo header.
helo: string &log &optional;
## Contents of the From header.
mailfrom: string &log &optional;
## Contents of the Rcpt header.
rcptto: set[string] &log &optional;
## Contents of the Date header.
date: string &log &optional;
## Contents of the From header.
from: string &log &optional;
## Contents of the To header.
to: set[string] &log &optional;
## Contents of the ReplyTo header.
reply_to: string &log &optional;
## Contents of the MsgID header.
msg_id: string &log &optional;
## Contents of the In-Reply-To header.
in_reply_to: string &log &optional;
## Contents of the Subject header.
subject: string &log &optional;
## Contents of the X-Origininating-IP header.
x_originating_ip: addr &log &optional;
## Contents of the first Received header.
first_received: string &log &optional;
## Contents of the second Received header.
second_received: string &log &optional;
## The last message the server sent to the client.
## The last message that the server sent to the client.
last_reply: string &log &optional;
## The message transmission path, as extracted from the headers.
path: vector of addr &log &optional;
## Value of the User-Agent header from the client.
user_agent: string &log &optional;
## Indicate if the "Received: from" headers should still be processed.
## Indicates if the "Received: from" headers should still be processed.
process_received_from: bool &default=T;
## Indicates if client activity has been seen, but not yet logged
## Indicates if client activity has been seen, but not yet logged.
has_client_activity: bool &default=F;
};

View file

@ -9,19 +9,21 @@ export {
type Info: record {
## Time when the proxy connection was first detected.
ts: time &log;
## Unique ID for the tunnel - may correspond to connection uid or be non-existent.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## Protocol version of SOCKS.
version: count &log;
## Username for the proxy if extracted from the network.
## Username for the proxy if extracted from the network..
user: string &log &optional;
## Server status for the attempt at using the proxy.
status: string &log &optional;
## Client requested SOCKS address. Could be an address, a name or both.
## Client requested SOCKS address. Could be an address, a name or both.
request: SOCKS::Address &log &optional;
## Client requested port.
request_p: port &log &optional;
## Server bound address. Could be an address, a name or both.
## Server bound address. Could be an address, a name or both.
bound: SOCKS::Address &log &optional;
## Server bound port.
bound_p: port &log &optional;

View file

@ -26,21 +26,23 @@ export {
type Info: record {
## Time when the SSH connection began.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## Indicates if the login was heuristically guessed to be "success"
## or "failure".
status: string &log &optional;
## Direction of the connection. If the client was a local host
## logging into an external host, this would be OUTBOUD. INBOUND
## logging into an external host, this would be OUTBOUND. INBOUND
## would be set for the opposite situation.
# TODO: handle local-local and remote-remote better.
direction: Direction &log &optional;
## Software string given by the client.
## Software string from the client.
client: string &log &optional;
## Software string given by the server.
## Software string from the server.
server: string &log &optional;
## Amount of data returned from the server. This is currently
## Amount of data returned from the server. This is currently
## the only measure of the success heuristic and it is logged to
## assist analysts looking at the logs to make their own determination
## about the success on a case-by-case basis.

View file

@ -9,15 +9,17 @@ export {
redef enum Log::ID += { LOG };
type Info: record {
## Time when the SSL connection began.
## Time when the SSL connection was first detected.
ts: time &log;
uid: string &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## SSL/TLS version the server offered.
## SSL/TLS version that the server offered.
version: string &log &optional;
## SSL/TLS cipher suite the server chose.
## SSL/TLS cipher suite that the server chose.
cipher: string &log &optional;
## Value of the Server Name Indicator SSL/TLS extension. It
## Value of the Server Name Indicator SSL/TLS extension. It
## indicates the server name that the client was requesting.
server_name: string &log &optional;
## Session ID offered by the client for session resumption.
@ -28,37 +30,48 @@ export {
issuer_subject: string &log &optional;
## NotValidBefore field value from the server certificate.
not_valid_before: time &log &optional;
## NotValidAfter field value from the serve certificate.
## NotValidAfter field value from the server certificate.
not_valid_after: time &log &optional;
## Last alert that was seen during the connection.
last_alert: string &log &optional;
## Subject of the X.509 certificate offered by the client.
client_subject: string &log &optional;
## Subject of the signer of the X.509 certificate offered by the client.
client_issuer_subject: string &log &optional;
## Full binary server certificate stored in DER format.
cert: string &optional;
## Chain of certificates offered by the server to validate its
## Chain of certificates offered by the server to validate its
## complete signing chain.
cert_chain: vector of string &optional;
## Full binary client certificate stored in DER format.
client_cert: string &optional;
## Chain of certificates offered by the client to validate its
## complete signing chain.
client_cert_chain: vector of string &optional;
## The analyzer ID used for the analyzer instance attached
## to each connection. It is not used for logging since it's a
## meaningless arbitrary number.
analyzer_id: count &optional;
};
## The default root CA bundle. By loading the
## mozilla-ca-list.bro script it will be set to Mozilla's root CA list.
const root_certs: table[string] of string = {} &redef;
## If true, detach the SSL analyzer from the connection to prevent
## continuing to process encrypted traffic. Helps with performance
## (especially with large file transfers).
const disable_analyzer_after_detection = T &redef;
## The openssl command line utility. If it's in the path the default
## value will work, otherwise a full path string can be supplied for the
## utility.
const openssl_util = "openssl" &redef;
## Event that can be handled to access the SSL
## record as it is sent on to the logging framework.
global log_ssl: event(rec: Info);
@ -105,7 +118,8 @@ redef likely_server_ports += {
function set_session(c: connection)
{
if ( ! c?$ssl )
c$ssl = [$ts=network_time(), $uid=c$uid, $id=c$id, $cert_chain=vector()];
c$ssl = [$ts=network_time(), $uid=c$uid, $id=c$id, $cert_chain=vector(),
$client_cert_chain=vector()];
}
function finish(c: connection)
@ -139,23 +153,40 @@ event x509_certificate(c: connection, is_orig: bool, cert: X509, chain_idx: coun
# We aren't doing anything with client certificates yet.
if ( is_orig )
return;
if ( chain_idx == 0 )
{
# Save the primary cert.
c$ssl$cert = der_cert;
if ( chain_idx == 0 )
{
# Save the primary cert.
c$ssl$client_cert = der_cert;
# Also save other certificate information about the primary cert.
c$ssl$subject = cert$subject;
c$ssl$issuer_subject = cert$issuer;
c$ssl$not_valid_before = cert$not_valid_before;
c$ssl$not_valid_after = cert$not_valid_after;
# Also save other certificate information about the primary cert.
c$ssl$client_subject = cert$subject;
c$ssl$client_issuer_subject = cert$issuer;
}
else
{
# Otherwise, add it to the cert validation chain.
c$ssl$client_cert_chain[|c$ssl$client_cert_chain|] = der_cert;
}
}
else
{
# Otherwise, add it to the cert validation chain.
c$ssl$cert_chain[|c$ssl$cert_chain|] = der_cert;
if ( chain_idx == 0 )
{
# Save the primary cert.
c$ssl$cert = der_cert;
# Also save other certificate information about the primary cert.
c$ssl$subject = cert$subject;
c$ssl$issuer_subject = cert$issuer;
c$ssl$not_valid_before = cert$not_valid_before;
c$ssl$not_valid_after = cert$not_valid_after;
}
else
{
# Otherwise, add it to the cert validation chain.
c$ssl$cert_chain[|c$ssl$cert_chain|] = der_cert;
}
}
}

File diff suppressed because one or more lines are too long

View file

@ -9,9 +9,11 @@ export {
redef enum Log::ID += { LOG };
type Info: record {
## Timestamp of when the syslog message was seen.
## Timestamp when the syslog message was seen.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## Protocol over which the message was seen.
proto: transport_proto &log;