Merge remote-tracking branch 'origin/master' into topic/bernhard/sqlite

Conflicts:
	scripts/base/frameworks/logging/__load__.bro
	src/CMakeLists.txt
	src/logging.bif
	src/types.bif
This commit is contained in:
Bernhard Amann 2012-07-25 15:04:23 -07:00
commit da157c8ded
296 changed files with 4703 additions and 2175 deletions

View file

@ -10,14 +10,14 @@ export {
## The communication logging stream identifier.
redef enum Log::ID += { LOG };
## Which interface to listen on. The addresses ``0.0.0.0`` and ``[::]``
## are wildcards.
const listen_interface = 0.0.0.0 &redef;
## Which port to listen on.
const listen_port = 47757/tcp &redef;
## This defines if a listening socket should use SSL.
const listen_ssl = F &redef;
@ -34,7 +34,7 @@ export {
## :bro:id:`Communication::listen_port` if it's already in use.
const listen_retry = 30 secs &redef;
## Default compression level. Compression level is 0-9, with 0 = no
## Default compression level. Compression level is 0-9, with 0 = no
## compression.
global compression_level = 0 &redef;
@ -42,7 +42,7 @@ export {
type Info: record {
## The network time at which a communication event occurred.
ts: time &log;
## The peer name (if any) for which a communication event is concerned.
## The peer name (if any) with which a communication event is concerned.
peer: string &log &optional;
## Where the communication event message originated from, that is,
## either from the scripting layer or inside the Bro process.
@ -70,7 +70,7 @@ export {
## If the *host* field is a non-global IPv6 address, this field
## can specify a particular :rfc:`4007` ``zone_id``.
zone_id: string &optional;
## Port of the remote Bro communication endpoint if we are initiating
## the connection based on the :bro:id:`connect` field.
p: port &optional;
@ -120,7 +120,7 @@ export {
## The remote peer.
peer: event_peer &optional;
## Indicates the status of the node.
connected: bool &default = F;
};
@ -163,7 +163,7 @@ event bro_init() &priority=5
function do_script_log_common(level: count, src: count, msg: string)
{
Log::write(Communication::LOG, [$ts = network_time(),
Log::write(Communication::LOG, [$ts = network_time(),
$level = (level == REMOTE_LOG_INFO ? "info" : "error"),
$src_name = src_names[src],
$peer = get_event_peer()$descr,
@ -199,9 +199,9 @@ function connect_peer(peer: string)
local class = node?$class ? node$class : "";
local zone_id = node?$zone_id ? node$zone_id : "";
local id = connect(node$host, zone_id, p, class, node$retry, node$ssl);
if ( id == PEER_ID_NONE )
Log::write(Communication::LOG, [$ts = network_time(),
Log::write(Communication::LOG, [$ts = network_time(),
$peer = get_event_peer()$descr,
$message = "can't trigger connect"]);
pending_peers[id] = node;
@ -340,7 +340,7 @@ event bro_init() &priority = -10 # let others modify nodes
{
if ( |nodes| > 0 )
enable_communication();
for ( tag in nodes )
{
if ( ! nodes[tag]$connect )

View file

@ -55,7 +55,8 @@ export {
pred: function(typ: Input::Event, left: any, right: any): bool &optional;
## A key/value table that will be passed on the reader.
## Interpretation of the values is left to the reader.
## Interpretation of the values is left to the writer, but
## usually they will be used for configuration purposes.
config: table[string] of string &default=table();
};
@ -90,8 +91,9 @@ export {
ev: any;
## A key/value table that will be passed on the reader.
## Interpretation of the values is left to the reader.
config: table[string] of string &default=table();
## Interpretation of the values is left to the writer, but
## usually they will be used for configuration purposes.
config: table[string] of string &default=table();
};
## Create a new table input from a given source. Returns true on success.

View file

@ -3,4 +3,5 @@
@load ./writers/ascii
@load ./writers/dataseries
@load ./writers/sqlite
@load ./writers/elasticsearch
@load ./writers/none

View file

@ -139,9 +139,10 @@ export {
## default comes out of :bro:id:`Log::default_rotation_postprocessors`.
postprocessor: function(info: RotationInfo) : bool &optional;
## A key/value table that will be passed on the writer.
## Interpretation of the values is left to the writer.
config: table[string] of string &default=table();
## A key/value table that will be passed on to the writer.
## Interpretation of the values is left to the writer, but
## usually they will be used for configuration purposes.
config: table[string] of string &default=table();
};
## Sentinel value for indicating that a filter was not found when looked up.

View file

@ -8,12 +8,13 @@ export {
## into files. This is primarily for debugging purposes.
const output_to_stdout = F &redef;
## If true, include a header line with column names and description
## of the other ASCII logging options that were used.
const include_header = T &redef;
## If true, include lines with log meta information such as column names with
## types, the values of ASCII logging options that in use, and the time when the
## file was opened and closes (the latter at the end).
const include_meta = T &redef;
## Prefix for the header line if included.
const header_prefix = "#" &redef;
## Prefix for lines with meta information.
const meta_prefix = "#" &redef;
## Separator between fields.
const separator = "\t" &redef;

View file

@ -0,0 +1,46 @@
##! Log writer for sending logs to an ElasticSearch server.
##!
##! Note: This module is in testing and is not yet considered stable!
##!
##! There is one known memory issue. If your elasticsearch server is
##! running slowly and taking too long to return from bulk insert
##! requests, the message queue to the writer thread will continue
##! growing larger and larger giving the appearance of a memory leak.
module LogElasticSearch;
export {
## Name of the ES cluster
const cluster_name = "elasticsearch" &redef;
## ES Server
const server_host = "127.0.0.1" &redef;
## ES Port
const server_port = 9200 &redef;
## Name of the ES index
const index_prefix = "bro" &redef;
## The ES type prefix comes before the name of the related log.
## e.g. prefix = "bro_" would create types of bro_dns, bro_software, etc.
const type_prefix = "" &redef;
## The time before an ElasticSearch transfer will timeout.
## This is not working!
const transfer_timeout = 2secs;
## The batch size is the number of messages that will be queued up before
## they are sent to be bulk indexed.
const max_batch_size = 1000 &redef;
## The maximum amount of wall-clock time that is allowed to pass without
## finishing a bulk log send. This represents the maximum delay you
## would like to have with your logs before they are sent to ElasticSearch.
const max_batch_interval = 1min &redef;
## The maximum byte size for a buffered JSON string to send to the bulk
## insert API.
const max_byte_size = 1024 * 1024 &redef;
}

View file

@ -3,8 +3,8 @@
module LogNone;
export {
## If true, output some debugging output that can be useful for unit
##testing the logging framework.
## If true, output debugging output that can be useful for unit
## testing the logging framework.
const debug = F &redef;
}