Merge remote-tracking branch 'origin/master' into topic/dnthayer/alarms-mail

This commit is contained in:
Daniel Thayer 2012-10-30 11:32:58 -05:00
commit 0f97f0b6e4
618 changed files with 11183 additions and 2057 deletions

View file

@ -2,4 +2,5 @@
@load ./postprocessors
@load ./writers/ascii
@load ./writers/dataseries
@load ./writers/elasticsearch
@load ./writers/none

View file

@ -99,6 +99,12 @@ export {
## file name. Generally, filenames are expected to given
## without any extensions; writers will add appropiate
## extensions automatically.
##
## If this path is found to conflict with another filter's
## for the same writer type, it is automatically corrected
## by appending "-N", where N is the smallest integer greater
## or equal to 2 that allows the corrected path name to not
## conflict with another filter's.
path: string &optional;
## A function returning the output path for recording entries
@ -118,7 +124,10 @@ export {
## rec: An instance of the streams's ``columns`` type with its
## fields set to the values to be logged.
##
## Returns: The path to be used for the filter.
## Returns: The path to be used for the filter, which will be subject
## to the same automatic correction rules as the *path*
## field of :bro:type:`Log::Filter` in the case of conflicts
## with other filters trying to use the same writer/path pair.
path_func: function(id: ID, path: string, rec: any): string &optional;
## Subset of column names to record. If not given, all
@ -321,6 +330,11 @@ export {
## Log::default_rotation_postprocessor_cmd
## Log::default_rotation_postprocessors
global run_rotation_postprocessor_cmd: function(info: RotationInfo, npath: string) : bool;
## The streams which are currently active and not disabled.
## This table is not meant to be modified by users! Only use it for
## examining which streams are active.
global active_streams: table[ID] of Stream = table();
}
# We keep a script-level copy of all filters so that we can manipulate them.
@ -335,22 +349,23 @@ function __default_rotation_postprocessor(info: RotationInfo) : bool
{
if ( info$writer in default_rotation_postprocessors )
return default_rotation_postprocessors[info$writer](info);
return F;
else
# Return T by default so that postprocessor-less writers don't shutdown.
return T;
}
function default_path_func(id: ID, path: string, rec: any) : string
{
# The suggested path value is a previous result of this function
# or a filter path explicitly set by the user, so continue using it.
if ( path != "" )
return path;
local id_str = fmt("%s", id);
local parts = split1(id_str, /::/);
if ( |parts| == 2 )
{
# The suggested path value is a previous result of this function
# or a filter path explicitly set by the user, so continue using it.
if ( path != "" )
return path;
# Example: Notice::LOG -> "notice"
if ( parts[2] == "LOG" )
{
@ -405,11 +420,15 @@ function create_stream(id: ID, stream: Stream) : bool
if ( ! __create_stream(id, stream) )
return F;
active_streams[id] = stream;
return add_default_filter(id);
}
function disable_stream(id: ID) : bool
{
delete active_streams[id];
return __disable_stream(id);
}

View file

@ -8,12 +8,13 @@ export {
## into files. This is primarily for debugging purposes.
const output_to_stdout = F &redef;
## If true, include a header line with column names and description
## of the other ASCII logging options that were used.
const include_header = T &redef;
## If true, include lines with log meta information such as column names with
## types, the values of ASCII logging options that in use, and the time when the
## file was opened and closes (the latter at the end).
const include_meta = T &redef;
## Prefix for the header line if included.
const header_prefix = "#" &redef;
## Prefix for lines with meta information.
const meta_prefix = "#" &redef;
## Separator between fields.
const separator = "\t" &redef;

View file

@ -0,0 +1,48 @@
##! Log writer for sending logs to an ElasticSearch server.
##!
##! Note: This module is in testing and is not yet considered stable!
##!
##! There is one known memory issue. If your elasticsearch server is
##! running slowly and taking too long to return from bulk insert
##! requests, the message queue to the writer thread will continue
##! growing larger and larger giving the appearance of a memory leak.
module LogElasticSearch;
export {
## Name of the ES cluster
const cluster_name = "elasticsearch" &redef;
## ES Server
const server_host = "127.0.0.1" &redef;
## ES Port
const server_port = 9200 &redef;
## Name of the ES index
const index_prefix = "bro" &redef;
## The ES type prefix comes before the name of the related log.
## e.g. prefix = "bro\_" would create types of bro_dns, bro_software, etc.
const type_prefix = "" &redef;
## The time before an ElasticSearch transfer will timeout. Note that
## the fractional part of the timeout will be ignored. In particular, time
## specifications less than a second result in a timeout value of 0, which
## means "no timeout."
const transfer_timeout = 2secs;
## The batch size is the number of messages that will be queued up before
## they are sent to be bulk indexed.
const max_batch_size = 1000 &redef;
## The maximum amount of wall-clock time that is allowed to pass without
## finishing a bulk log send. This represents the maximum delay you
## would like to have with your logs before they are sent to ElasticSearch.
const max_batch_interval = 1min &redef;
## The maximum byte size for a buffered JSON string to send to the bulk
## insert API.
const max_byte_size = 1024 * 1024 &redef;
}