Merge remote-tracking branch 'origin/master' into topic/dnthayer/alarms-mail

This commit is contained in:
Daniel Thayer 2012-10-30 11:32:58 -05:00
commit 0f97f0b6e4
618 changed files with 11183 additions and 2057 deletions

View file

@ -10,14 +10,14 @@ export {
## The communication logging stream identifier.
redef enum Log::ID += { LOG };
## Which interface to listen on. The addresses ``0.0.0.0`` and ``[::]``
## are wildcards.
const listen_interface = 0.0.0.0 &redef;
## Which port to listen on.
const listen_port = 47757/tcp &redef;
## This defines if a listening socket should use SSL.
const listen_ssl = F &redef;
@ -34,7 +34,7 @@ export {
## :bro:id:`Communication::listen_port` if it's already in use.
const listen_retry = 30 secs &redef;
## Default compression level. Compression level is 0-9, with 0 = no
## Default compression level. Compression level is 0-9, with 0 = no
## compression.
global compression_level = 0 &redef;
@ -42,7 +42,7 @@ export {
type Info: record {
## The network time at which a communication event occurred.
ts: time &log;
## The peer name (if any) for which a communication event is concerned.
## The peer name (if any) with which a communication event is concerned.
peer: string &log &optional;
## Where the communication event message originated from, that is,
## either from the scripting layer or inside the Bro process.
@ -70,7 +70,7 @@ export {
## If the *host* field is a non-global IPv6 address, this field
## can specify a particular :rfc:`4007` ``zone_id``.
zone_id: string &optional;
## Port of the remote Bro communication endpoint if we are initiating
## the connection based on the :bro:id:`connect` field.
p: port &optional;
@ -120,7 +120,7 @@ export {
## The remote peer.
peer: event_peer &optional;
## Indicates the status of the node.
connected: bool &default = F;
};
@ -163,7 +163,7 @@ event bro_init() &priority=5
function do_script_log_common(level: count, src: count, msg: string)
{
Log::write(Communication::LOG, [$ts = network_time(),
Log::write(Communication::LOG, [$ts = network_time(),
$level = (level == REMOTE_LOG_INFO ? "info" : "error"),
$src_name = src_names[src],
$peer = get_event_peer()$descr,
@ -199,9 +199,9 @@ function connect_peer(peer: string)
local class = node?$class ? node$class : "";
local zone_id = node?$zone_id ? node$zone_id : "";
local id = connect(node$host, zone_id, p, class, node$retry, node$ssl);
if ( id == PEER_ID_NONE )
Log::write(Communication::LOG, [$ts = network_time(),
Log::write(Communication::LOG, [$ts = network_time(),
$peer = get_event_peer()$descr,
$message = "can't trigger connect"]);
pending_peers[id] = node;
@ -340,7 +340,7 @@ event bro_init() &priority = -10 # let others modify nodes
{
if ( |nodes| > 0 )
enable_communication();
for ( tag in nodes )
{
if ( ! nodes[tag]$connect )

View file

@ -8,8 +8,16 @@ export {
## The default input reader used. Defaults to `READER_ASCII`.
const default_reader = READER_ASCII &redef;
## The default reader mode used. Defaults to `MANUAL`.
const default_mode = MANUAL &redef;
## Flag that controls if the input framework accepts records
## that contain types that are not supported (at the moment
## file and function). If true, the input framework will
## warn in these cases, but continue. If false, it will
## abort. Defaults to false (abort)
const accept_unsupported_types = F &redef;
## TableFilter description type used for the `table` method.
type TableDescription: record {
## Common definitions for tables and events
@ -82,11 +90,11 @@ export {
## Record describing the fields to be retrieved from the source input.
fields: any;
## If want_record if false (default), the event receives each value in fields as a seperate argument.
## If it is set to true, the event receives all fields in a signle record value.
want_record: bool &default=F;
## If want_record if false, the event receives each value in fields as a separate argument.
## If it is set to true (default), the event receives all fields in a single record value.
want_record: bool &default=T;
## The event that is rised each time a new line is received from the reader.
## The event that is raised each time a new line is received from the reader.
## The event will receive an Input::Event enum as the first element, and the fields as the following arguments.
ev: any;
@ -106,7 +114,8 @@ export {
## description: `TableDescription` record describing the source.
global add_event: function(description: Input::EventDescription) : bool;
## Remove a input stream. Returns true on success and false if the named stream was not found.
## Remove a input stream. Returns true on success and false if the named stream was
## not found.
##
## id: string value identifying the stream to be removed
global remove: function(id: string) : bool;
@ -117,8 +126,9 @@ export {
## id: string value identifying the stream
global force_update: function(id: string) : bool;
## Event that is called, when the update of a specific source is finished
global update_finished: event(name: string, source:string);
## Event that is called, when the end of a data source has been reached, including
## after an update.
global end_of_data: event(name: string, source:string);
}
@load base/input.bif

View file

@ -2,4 +2,5 @@
@load ./postprocessors
@load ./writers/ascii
@load ./writers/dataseries
@load ./writers/elasticsearch
@load ./writers/none

View file

@ -99,6 +99,12 @@ export {
## file name. Generally, filenames are expected to given
## without any extensions; writers will add appropiate
## extensions automatically.
##
## If this path is found to conflict with another filter's
## for the same writer type, it is automatically corrected
## by appending "-N", where N is the smallest integer greater
## or equal to 2 that allows the corrected path name to not
## conflict with another filter's.
path: string &optional;
## A function returning the output path for recording entries
@ -118,7 +124,10 @@ export {
## rec: An instance of the streams's ``columns`` type with its
## fields set to the values to be logged.
##
## Returns: The path to be used for the filter.
## Returns: The path to be used for the filter, which will be subject
## to the same automatic correction rules as the *path*
## field of :bro:type:`Log::Filter` in the case of conflicts
## with other filters trying to use the same writer/path pair.
path_func: function(id: ID, path: string, rec: any): string &optional;
## Subset of column names to record. If not given, all
@ -321,6 +330,11 @@ export {
## Log::default_rotation_postprocessor_cmd
## Log::default_rotation_postprocessors
global run_rotation_postprocessor_cmd: function(info: RotationInfo, npath: string) : bool;
## The streams which are currently active and not disabled.
## This table is not meant to be modified by users! Only use it for
## examining which streams are active.
global active_streams: table[ID] of Stream = table();
}
# We keep a script-level copy of all filters so that we can manipulate them.
@ -335,22 +349,23 @@ function __default_rotation_postprocessor(info: RotationInfo) : bool
{
if ( info$writer in default_rotation_postprocessors )
return default_rotation_postprocessors[info$writer](info);
return F;
else
# Return T by default so that postprocessor-less writers don't shutdown.
return T;
}
function default_path_func(id: ID, path: string, rec: any) : string
{
# The suggested path value is a previous result of this function
# or a filter path explicitly set by the user, so continue using it.
if ( path != "" )
return path;
local id_str = fmt("%s", id);
local parts = split1(id_str, /::/);
if ( |parts| == 2 )
{
# The suggested path value is a previous result of this function
# or a filter path explicitly set by the user, so continue using it.
if ( path != "" )
return path;
# Example: Notice::LOG -> "notice"
if ( parts[2] == "LOG" )
{
@ -405,11 +420,15 @@ function create_stream(id: ID, stream: Stream) : bool
if ( ! __create_stream(id, stream) )
return F;
active_streams[id] = stream;
return add_default_filter(id);
}
function disable_stream(id: ID) : bool
{
delete active_streams[id];
return __disable_stream(id);
}

View file

@ -8,12 +8,13 @@ export {
## into files. This is primarily for debugging purposes.
const output_to_stdout = F &redef;
## If true, include a header line with column names and description
## of the other ASCII logging options that were used.
const include_header = T &redef;
## If true, include lines with log meta information such as column names with
## types, the values of ASCII logging options that in use, and the time when the
## file was opened and closes (the latter at the end).
const include_meta = T &redef;
## Prefix for the header line if included.
const header_prefix = "#" &redef;
## Prefix for lines with meta information.
const meta_prefix = "#" &redef;
## Separator between fields.
const separator = "\t" &redef;

View file

@ -0,0 +1,48 @@
##! Log writer for sending logs to an ElasticSearch server.
##!
##! Note: This module is in testing and is not yet considered stable!
##!
##! There is one known memory issue. If your elasticsearch server is
##! running slowly and taking too long to return from bulk insert
##! requests, the message queue to the writer thread will continue
##! growing larger and larger giving the appearance of a memory leak.
module LogElasticSearch;
export {
## Name of the ES cluster
const cluster_name = "elasticsearch" &redef;
## ES Server
const server_host = "127.0.0.1" &redef;
## ES Port
const server_port = 9200 &redef;
## Name of the ES index
const index_prefix = "bro" &redef;
## The ES type prefix comes before the name of the related log.
## e.g. prefix = "bro\_" would create types of bro_dns, bro_software, etc.
const type_prefix = "" &redef;
## The time before an ElasticSearch transfer will timeout. Note that
## the fractional part of the timeout will be ignored. In particular, time
## specifications less than a second result in a timeout value of 0, which
## means "no timeout."
const transfer_timeout = 2secs;
## The batch size is the number of messages that will be queued up before
## they are sent to be bulk indexed.
const max_batch_size = 1000 &redef;
## The maximum amount of wall-clock time that is allowed to pass without
## finishing a bulk log send. This represents the maximum delay you
## would like to have with your logs before they are sent to ElasticSearch.
const max_batch_interval = 1min &redef;
## The maximum byte size for a buffered JSON string to send to the bulk
## insert API.
const max_byte_size = 1024 * 1024 &redef;
}

View file

@ -23,7 +23,7 @@ redef Cluster::worker2manager_events += /Notice::cluster_notice/;
@if ( Cluster::local_node_type() != Cluster::MANAGER )
# The notice policy is completely handled by the manager and shouldn't be
# done by workers or proxies to save time for packet processing.
event bro_init() &priority=-11
event bro_init() &priority=11
{
Notice::policy = table();
}

View file

@ -1,5 +1,5 @@
##! This framework is intended to create an output and filtering path for
##! internal messages/warnings/errors. It should typically be loaded to
##! This framework is intended to create an output and filtering path for
##! internal messages/warnings/errors. It should typically be loaded to
##! avoid Bro spewing internal messages to standard error and instead log
##! them to a file in a standard way. Note that this framework deals with
##! the handling of internally-generated reporter messages, for the
@ -13,11 +13,11 @@ export {
redef enum Log::ID += { LOG };
## An indicator of reporter message severity.
type Level: enum {
type Level: enum {
## Informational, not needing specific attention.
INFO,
INFO,
## Warning of a potential problem.
WARNING,
WARNING,
## A non-fatal error that should be addressed, but doesn't
## terminate program execution.
ERROR
@ -36,24 +36,55 @@ export {
## Not all reporter messages will have locations in them though.
location: string &log &optional;
};
## Tunable for sending reporter warning messages to STDERR. The option to
## turn it off is presented here in case Bro is being run by some
## external harness and shouldn't output anything to the console.
const warnings_to_stderr = T &redef;
## Tunable for sending reporter error messages to STDERR. The option to
## turn it off is presented here in case Bro is being run by some
## external harness and shouldn't output anything to the console.
const errors_to_stderr = T &redef;
}
global stderr: file;
event bro_init() &priority=5
{
Log::create_stream(Reporter::LOG, [$columns=Info]);
if ( errors_to_stderr || warnings_to_stderr )
stderr = open("/dev/stderr");
}
event reporter_info(t: time, msg: string, location: string)
event reporter_info(t: time, msg: string, location: string) &priority=-5
{
Log::write(Reporter::LOG, [$ts=t, $level=INFO, $message=msg, $location=location]);
}
event reporter_warning(t: time, msg: string, location: string)
event reporter_warning(t: time, msg: string, location: string) &priority=-5
{
if ( warnings_to_stderr )
{
if ( t > double_to_time(0.0) )
print stderr, fmt("WARNING: %.6f %s (%s)", t, msg, location);
else
print stderr, fmt("WARNING: %s (%s)", msg, location);
}
Log::write(Reporter::LOG, [$ts=t, $level=WARNING, $message=msg, $location=location]);
}
event reporter_error(t: time, msg: string, location: string)
event reporter_error(t: time, msg: string, location: string) &priority=-5
{
if ( errors_to_stderr )
{
if ( t > double_to_time(0.0) )
print stderr, fmt("ERROR: %.6f %s (%s)", t, msg, location);
else
print stderr, fmt("ERROR: %s (%s)", msg, location);
}
Log::write(Reporter::LOG, [$ts=t, $level=ERROR, $message=msg, $location=location]);
}