Merge remote-tracking branch 'origin/master' into topic/bernhard/sqlite

Conflicts:
	scripts/base/frameworks/logging/__load__.bro
	src/CMakeLists.txt
	src/logging.bif
	src/types.bif
This commit is contained in:
Bernhard Amann 2012-07-25 15:04:23 -07:00
commit da157c8ded
296 changed files with 4703 additions and 2175 deletions

View file

@ -10,14 +10,14 @@ export {
## The communication logging stream identifier.
redef enum Log::ID += { LOG };
## Which interface to listen on. The addresses ``0.0.0.0`` and ``[::]``
## are wildcards.
const listen_interface = 0.0.0.0 &redef;
## Which port to listen on.
const listen_port = 47757/tcp &redef;
## This defines if a listening socket should use SSL.
const listen_ssl = F &redef;
@ -34,7 +34,7 @@ export {
## :bro:id:`Communication::listen_port` if it's already in use.
const listen_retry = 30 secs &redef;
## Default compression level. Compression level is 0-9, with 0 = no
## Default compression level. Compression level is 0-9, with 0 = no
## compression.
global compression_level = 0 &redef;
@ -42,7 +42,7 @@ export {
type Info: record {
## The network time at which a communication event occurred.
ts: time &log;
## The peer name (if any) for which a communication event is concerned.
## The peer name (if any) with which a communication event is concerned.
peer: string &log &optional;
## Where the communication event message originated from, that is,
## either from the scripting layer or inside the Bro process.
@ -70,7 +70,7 @@ export {
## If the *host* field is a non-global IPv6 address, this field
## can specify a particular :rfc:`4007` ``zone_id``.
zone_id: string &optional;
## Port of the remote Bro communication endpoint if we are initiating
## the connection based on the :bro:id:`connect` field.
p: port &optional;
@ -120,7 +120,7 @@ export {
## The remote peer.
peer: event_peer &optional;
## Indicates the status of the node.
connected: bool &default = F;
};
@ -163,7 +163,7 @@ event bro_init() &priority=5
function do_script_log_common(level: count, src: count, msg: string)
{
Log::write(Communication::LOG, [$ts = network_time(),
Log::write(Communication::LOG, [$ts = network_time(),
$level = (level == REMOTE_LOG_INFO ? "info" : "error"),
$src_name = src_names[src],
$peer = get_event_peer()$descr,
@ -199,9 +199,9 @@ function connect_peer(peer: string)
local class = node?$class ? node$class : "";
local zone_id = node?$zone_id ? node$zone_id : "";
local id = connect(node$host, zone_id, p, class, node$retry, node$ssl);
if ( id == PEER_ID_NONE )
Log::write(Communication::LOG, [$ts = network_time(),
Log::write(Communication::LOG, [$ts = network_time(),
$peer = get_event_peer()$descr,
$message = "can't trigger connect"]);
pending_peers[id] = node;
@ -340,7 +340,7 @@ event bro_init() &priority = -10 # let others modify nodes
{
if ( |nodes| > 0 )
enable_communication();
for ( tag in nodes )
{
if ( ! nodes[tag]$connect )

View file

@ -55,7 +55,8 @@ export {
pred: function(typ: Input::Event, left: any, right: any): bool &optional;
## A key/value table that will be passed on the reader.
## Interpretation of the values is left to the reader.
## Interpretation of the values is left to the writer, but
## usually they will be used for configuration purposes.
config: table[string] of string &default=table();
};
@ -90,8 +91,9 @@ export {
ev: any;
## A key/value table that will be passed on the reader.
## Interpretation of the values is left to the reader.
config: table[string] of string &default=table();
## Interpretation of the values is left to the writer, but
## usually they will be used for configuration purposes.
config: table[string] of string &default=table();
};
## Create a new table input from a given source. Returns true on success.

View file

@ -3,4 +3,5 @@
@load ./writers/ascii
@load ./writers/dataseries
@load ./writers/sqlite
@load ./writers/elasticsearch
@load ./writers/none

View file

@ -139,9 +139,10 @@ export {
## default comes out of :bro:id:`Log::default_rotation_postprocessors`.
postprocessor: function(info: RotationInfo) : bool &optional;
## A key/value table that will be passed on the writer.
## Interpretation of the values is left to the writer.
config: table[string] of string &default=table();
## A key/value table that will be passed on to the writer.
## Interpretation of the values is left to the writer, but
## usually they will be used for configuration purposes.
config: table[string] of string &default=table();
};
## Sentinel value for indicating that a filter was not found when looked up.

View file

@ -8,12 +8,13 @@ export {
## into files. This is primarily for debugging purposes.
const output_to_stdout = F &redef;
## If true, include a header line with column names and description
## of the other ASCII logging options that were used.
const include_header = T &redef;
## If true, include lines with log meta information such as column names with
## types, the values of ASCII logging options that in use, and the time when the
## file was opened and closes (the latter at the end).
const include_meta = T &redef;
## Prefix for the header line if included.
const header_prefix = "#" &redef;
## Prefix for lines with meta information.
const meta_prefix = "#" &redef;
## Separator between fields.
const separator = "\t" &redef;

View file

@ -0,0 +1,46 @@
##! Log writer for sending logs to an ElasticSearch server.
##!
##! Note: This module is in testing and is not yet considered stable!
##!
##! There is one known memory issue. If your elasticsearch server is
##! running slowly and taking too long to return from bulk insert
##! requests, the message queue to the writer thread will continue
##! growing larger and larger giving the appearance of a memory leak.
module LogElasticSearch;
export {
## Name of the ES cluster
const cluster_name = "elasticsearch" &redef;
## ES Server
const server_host = "127.0.0.1" &redef;
## ES Port
const server_port = 9200 &redef;
## Name of the ES index
const index_prefix = "bro" &redef;
## The ES type prefix comes before the name of the related log.
## e.g. prefix = "bro_" would create types of bro_dns, bro_software, etc.
const type_prefix = "" &redef;
## The time before an ElasticSearch transfer will timeout.
## This is not working!
const transfer_timeout = 2secs;
## The batch size is the number of messages that will be queued up before
## they are sent to be bulk indexed.
const max_batch_size = 1000 &redef;
## The maximum amount of wall-clock time that is allowed to pass without
## finishing a bulk log send. This represents the maximum delay you
## would like to have with your logs before they are sent to ElasticSearch.
const max_batch_interval = 1min &redef;
## The maximum byte size for a buffered JSON string to send to the bulk
## insert API.
const max_byte_size = 1024 * 1024 &redef;
}

View file

@ -3,8 +3,8 @@
module LogNone;
export {
## If true, output some debugging output that can be useful for unit
##testing the logging framework.
## If true, output debugging output that can be useful for unit
## testing the logging framework.
const debug = F &redef;
}

View file

@ -115,6 +115,61 @@ type icmp_context: record {
DF: bool; ##< True if the packets *don't fragment* flag is set.
};
## Values extracted from a Prefix Information option in an ICMPv6 neighbor
## discovery message as specified by :rfc:`4861`.
##
## .. bro:see:: icmp6_nd_option
type icmp6_nd_prefix_info: record {
## Number of leading bits of the *prefix* that are valid.
prefix_len: count;
## Flag indicating the prefix can be used for on-link determination.
L_flag: bool;
## Autonomous address-configuration flag.
A_flag: bool;
## Length of time in seconds that the prefix is valid for purpose of
## on-link determination (0xffffffff represents infinity).
valid_lifetime: interval;
## Length of time in seconds that the addresses generated from the prefix
## via stateless address autoconfiguration remain preferred
## (0xffffffff represents infinity).
preferred_lifetime: interval;
## An IP address or prefix of an IP address. Use the *prefix_len* field
## to convert this into a :bro:type:`subnet`.
prefix: addr;
};
## Options extracted from ICMPv6 neighbor discovery messages as specified
## by :rfc:`4861`.
##
## .. bro:see:: icmp_router_solicitation icmp_router_advertisement
## icmp_neighbor_advertisement icmp_neighbor_solicitation icmp_redirect
## icmp6_nd_options
type icmp6_nd_option: record {
## 8-bit identifier of the type of option.
otype: count;
## 8-bit integer representing the length of the option (including the type
## and length fields) in units of 8 octets.
len: count;
## Source Link-Layer Address (Type 1) or Target Link-Layer Address (Type 2).
## Byte ordering of this is dependent on the actual link-layer.
link_address: string &optional;
## Prefix Information (Type 3).
prefix: icmp6_nd_prefix_info &optional;
## Redirected header (Type 4). This field contains the context of the
## original, redirected packet.
redirect: icmp_context &optional;
## Recommended MTU for the link (Type 5).
mtu: count &optional;
## The raw data of the option (everything after type & length fields),
## useful for unknown option types or when the full option payload is
## truncated in the captured packet. In those cases, option fields
## won't be pre-extracted into the fields above.
payload: string &optional;
};
## A type alias for a vector of ICMPv6 neighbor discovery message options.
type icmp6_nd_options: vector of icmp6_nd_option;
# A DNS mapping between IP address and hostname resolved by Bro's internal
# resolver.
#

View file

@ -17,7 +17,7 @@ export {
type Info: record {
## This is the time of the first packet.
ts: time &log;
## A unique identifier of a connection.
## A unique identifier of the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
@ -30,7 +30,7 @@ export {
## tear-downs, this will not include the final ACK.
duration: interval &log &optional;
## The number of payload bytes the originator sent. For TCP
## this is taken from sequence numbers and might be inaccurate
## this is taken from sequence numbers and might be inaccurate
## (e.g., due to large connections)
orig_bytes: count &log &optional;
## The number of payload bytes the responder sent. See ``orig_bytes``.
@ -54,16 +54,16 @@ export {
## OTH No SYN seen, just midstream traffic (a "partial connection" that was not later closed).
## ========== ===============================================
conn_state: string &log &optional;
## If the connection is originated locally, this value will be T. If
## it was originated remotely it will be F. In the case that the
## :bro:id:`Site::local_nets` variable is undefined, this field will
## :bro:id:`Site::local_nets` variable is undefined, this field will
## be left empty at all times.
local_orig: bool &log &optional;
## Indicates the number of bytes missed in content gaps which is
## representative of packet loss. A value other than zero will
## normally cause protocol analysis to fail but some analysis may
## Indicates the number of bytes missed in content gaps, which is
## representative of packet loss. A value other than zero will
## normally cause protocol analysis to fail but some analysis may
## have been completed prior to the packet loss.
missed_bytes: count &log &default=0;
@ -83,25 +83,26 @@ export {
## i inconsistent packet (e.g. SYN+RST bits both set)
## ====== ====================================================
##
## If the letter is in upper case it means the event comes from the
## originator and lower case then means the responder.
## Also, there is compression. We only record one "d" in each direction,
## for instance. I.e., we just record that data went in that direction.
## This history is not meant to encode how much data that happened to
## be.
## If the event comes from the originator, the letter is in upper-case; if it comes
## from the responder, it's in lower-case. Multiple packets of the same type will
## only be noted once (e.g. we only record one "d" in each direction, regardless of
## how many data packets were seen.)
history: string &log &optional;
## Number of packets the originator sent.
## Number of packets that the originator sent.
## Only set if :bro:id:`use_conn_size_analyzer` = T
orig_pkts: count &log &optional;
## Number IP level bytes the originator sent (as seen on the wire,
## Number of IP level bytes that the originator sent (as seen on the wire,
## taken from IP total_length header field).
## Only set if :bro:id:`use_conn_size_analyzer` = T
orig_ip_bytes: count &log &optional;
## Number of packets the responder sent. See ``orig_pkts``.
## Number of packets that the responder sent.
## Only set if :bro:id:`use_conn_size_analyzer` = T
resp_pkts: count &log &optional;
## Number IP level bytes the responder sent. See ``orig_pkts``.
## Number og IP level bytes that the responder sent (as seen on the wire,
## taken from IP total_length header field).
## Only set if :bro:id:`use_conn_size_analyzer` = T
resp_ip_bytes: count &log &optional;
## If this connection was over a tunnel, indicate the
## If this connection was over a tunnel, indicate the
## *uid* values for any encapsulating parent connections
## used over the lifetime of this inner connection.
tunnel_parents: set[string] &log;
@ -199,10 +200,10 @@ function set_conn(c: connection, eoc: bool)
c$conn$proto=get_port_transport_proto(c$id$resp_p);
if( |Site::local_nets| > 0 )
c$conn$local_orig=Site::is_local_addr(c$id$orig_h);
if ( eoc )
{
if ( c$duration > 0secs )
if ( c$duration > 0secs )
{
c$conn$duration=c$duration;
c$conn$orig_bytes=c$orig$size;
@ -218,7 +219,7 @@ function set_conn(c: connection, eoc: bool)
c$conn$resp_ip_bytes = c$resp$num_bytes_ip;
}
local service = determine_service(c);
if ( service != "" )
if ( service != "" )
c$conn$service=service;
c$conn$conn_state=conn_state(c, get_port_transport_proto(c$id$resp_p));
@ -230,7 +231,7 @@ function set_conn(c: connection, eoc: bool)
event content_gap(c: connection, is_orig: bool, seq: count, length: count) &priority=5
{
set_conn(c, F);
c$conn$missed_bytes = c$conn$missed_bytes + length;
}
@ -241,7 +242,7 @@ event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5
add c$conn$tunnel_parents[e[|e|-1]$uid];
c$tunnel = e;
}
event connection_state_remove(c: connection) &priority=5
{
set_conn(c, T);

View file

@ -45,16 +45,16 @@ export {
AA: bool &log &default=F;
## The Truncation bit specifies that the message was truncated.
TC: bool &log &default=F;
## The Recursion Desired bit indicates to a name server to recursively
## purse the query.
## The Recursion Desired bit in a request message indicates that
## the client wants recursive service for this query.
RD: bool &log &default=F;
## The Recursion Available bit in a response message indicates if
## The Recursion Available bit in a response message indicates that
## the name server supports recursive queries.
RA: bool &log &default=F;
## A reserved field that is currently supposed to be zero in all
## queries and responses.
Z: count &log &default=0;
## The set of resource descriptions in answer of the query.
## The set of resource descriptions in the query answer.
answers: vector of string &log &optional;
## The caching intervals of the associated RRs described by the
## ``answers`` field.
@ -162,11 +162,11 @@ function set_session(c: connection, msg: dns_msg, is_query: bool)
c$dns = c$dns_state$pending[msg$id];
c$dns$rcode = msg$rcode;
c$dns$rcode_name = base_errors[msg$rcode];
if ( ! is_query )
{
c$dns$rcode = msg$rcode;
c$dns$rcode_name = base_errors[msg$rcode];
if ( ! c$dns?$total_answers )
c$dns$total_answers = msg$num_answers;

View file

@ -28,7 +28,9 @@ export {
type Info: record {
## Time when the command was sent.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## User name for the current FTP session.
user: string &log &default="<unknown>";

View file

@ -22,7 +22,9 @@ export {
type Info: record {
## Timestamp for when the request happened.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## Represents the pipelined depth into the connection of this
## request/response transaction.
@ -112,7 +114,7 @@ event bro_init() &priority=5
# DPD configuration.
const ports = {
80/tcp, 81/tcp, 631/tcp, 1080/tcp, 3138/tcp,
80/tcp, 81/tcp, 631/tcp, 1080/tcp, 3128/tcp,
8000/tcp, 8080/tcp, 8888/tcp,
};
redef dpd_config += {

View file

@ -11,7 +11,9 @@ export {
type Info: record {
## Timestamp when the command was seen.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## Nick name given for the connection.
nick: string &log &optional;

View file

@ -8,33 +8,51 @@ export {
redef enum Log::ID += { LOG };
type Info: record {
## Time when the message was first seen.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## This is a number that indicates the number of messages deep into
## this connection where this particular message was transferred.
## A count to represent the depth of this message transaction in a single
## connection where multiple messages were transferred.
trans_depth: count &log;
## Contents of the Helo header.
helo: string &log &optional;
## Contents of the From header.
mailfrom: string &log &optional;
## Contents of the Rcpt header.
rcptto: set[string] &log &optional;
## Contents of the Date header.
date: string &log &optional;
## Contents of the From header.
from: string &log &optional;
## Contents of the To header.
to: set[string] &log &optional;
## Contents of the ReplyTo header.
reply_to: string &log &optional;
## Contents of the MsgID header.
msg_id: string &log &optional;
## Contents of the In-Reply-To header.
in_reply_to: string &log &optional;
## Contents of the Subject header.
subject: string &log &optional;
## Contents of the X-Origininating-IP header.
x_originating_ip: addr &log &optional;
## Contents of the first Received header.
first_received: string &log &optional;
## Contents of the second Received header.
second_received: string &log &optional;
## The last message the server sent to the client.
## The last message that the server sent to the client.
last_reply: string &log &optional;
## The message transmission path, as extracted from the headers.
path: vector of addr &log &optional;
## Value of the User-Agent header from the client.
user_agent: string &log &optional;
## Indicate if the "Received: from" headers should still be processed.
## Indicates if the "Received: from" headers should still be processed.
process_received_from: bool &default=T;
## Indicates if client activity has been seen, but not yet logged
## Indicates if client activity has been seen, but not yet logged.
has_client_activity: bool &default=F;
};

View file

@ -9,19 +9,21 @@ export {
type Info: record {
## Time when the proxy connection was first detected.
ts: time &log;
## Unique ID for the tunnel - may correspond to connection uid or be non-existent.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## Protocol version of SOCKS.
version: count &log;
## Username for the proxy if extracted from the network.
## Username for the proxy if extracted from the network..
user: string &log &optional;
## Server status for the attempt at using the proxy.
status: string &log &optional;
## Client requested SOCKS address. Could be an address, a name or both.
## Client requested SOCKS address. Could be an address, a name or both.
request: SOCKS::Address &log &optional;
## Client requested port.
request_p: port &log &optional;
## Server bound address. Could be an address, a name or both.
## Server bound address. Could be an address, a name or both.
bound: SOCKS::Address &log &optional;
## Server bound port.
bound_p: port &log &optional;
@ -83,5 +85,8 @@ event socks_reply(c: connection, version: count, reply: count, sa: SOCKS::Addres
event socks_reply(c: connection, version: count, reply: count, sa: SOCKS::Address, p: port) &priority=-5
{
Log::write(SOCKS::LOG, c$socks);
# This will handle the case where the analyzer failed in some way and was removed. We probably
# don't want to log these connections.
if ( "SOCKS" in c$service )
Log::write(SOCKS::LOG, c$socks);
}

View file

@ -26,21 +26,23 @@ export {
type Info: record {
## Time when the SSH connection began.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## Indicates if the login was heuristically guessed to be "success"
## or "failure".
status: string &log &optional;
## Direction of the connection. If the client was a local host
## logging into an external host, this would be OUTBOUD. INBOUND
## logging into an external host, this would be OUTBOUND. INBOUND
## would be set for the opposite situation.
# TODO: handle local-local and remote-remote better.
direction: Direction &log &optional;
## Software string given by the client.
## Software string from the client.
client: string &log &optional;
## Software string given by the server.
## Software string from the server.
server: string &log &optional;
## Amount of data returned from the server. This is currently
## Amount of data returned from the server. This is currently
## the only measure of the success heuristic and it is logged to
## assist analysts looking at the logs to make their own determination
## about the success on a case-by-case basis.

View file

@ -9,13 +9,15 @@ export {
redef enum Log::ID += { LOG };
type Info: record {
## Time when the SSL connection began.
## Time when the SSL connection was first detected.
ts: time &log;
uid: string &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## SSL/TLS version the server offered.
## SSL/TLS version that the server offered.
version: string &log &optional;
## SSL/TLS cipher suite the server chose.
## SSL/TLS cipher suite that the server chose.
cipher: string &log &optional;
## Value of the Server Name Indicator SSL/TLS extension. It
## indicates the server name that the client was requesting.

File diff suppressed because one or more lines are too long

View file

@ -9,9 +9,11 @@ export {
redef enum Log::ID += { LOG };
type Info: record {
## Timestamp of when the syslog message was seen.
## Timestamp when the syslog message was seen.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## Protocol over which the message was seen.
proto: transport_proto &log;