mirror of
https://github.com/zeek/zeek.git
synced 2025-10-04 07:38:19 +00:00
Merge branch 'master' into topic/jsiwek/autodoc-fixes
Conflicts: scripts/CMakeLists.txt scripts/base/frameworks/cluster/setup-connections.bro scripts/base/frameworks/communication/__load__.bro scripts/base/frameworks/metrics/conn-example.bro scripts/base/frameworks/metrics/http-example.bro scripts/site/local.bro
This commit is contained in:
commit
2a9ea6b8ba
96 changed files with 1809 additions and 722 deletions
|
@ -9,10 +9,6 @@ redef peer_description = Cluster::node;
|
|||
# Add a cluster prefix.
|
||||
@prefixes += cluster
|
||||
|
||||
# Make this a controllable node since all cluster nodes are inherently
|
||||
# controllable.
|
||||
@load frameworks/control/controllee
|
||||
|
||||
## If this script isn't found anywhere, the cluster bombs out.
|
||||
## Loading the cluster framework requires that a script by this name exists
|
||||
## somewhere in the BROPATH. The only thing in the file should be the
|
||||
|
|
|
@ -10,11 +10,14 @@
|
|||
|
||||
@prefixes += cluster-manager
|
||||
|
||||
## Load the script for local site configuration for the manager node.
|
||||
@load site/local-manager
|
||||
|
||||
## Turn off remote logging since this is the manager and should only log here.
|
||||
redef Log::enable_remote_logging = F;
|
||||
|
||||
## Use the cluster's archive logging script.
|
||||
redef Log::default_rotation_postprocessor = "archive-log";
|
||||
redef Log::default_rotation_postprocessor_cmd = "archive-log";
|
||||
|
||||
## We're processing essentially *only* remote events.
|
||||
redef max_remote_events_processed = 10000;
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
|
||||
@prefixes += cluster-proxy
|
||||
|
||||
## Load the script for local site configuration for proxy nodes.
|
||||
@load site/local-proxy
|
||||
|
||||
## The proxy only syncs state; does not forward events.
|
||||
redef forward_remote_events = F;
|
||||
redef forward_remote_state_changes = T;
|
||||
|
@ -12,5 +15,5 @@ redef Log::enable_local_logging = F;
|
|||
redef Log::enable_remote_logging = T;
|
||||
|
||||
## Use the cluster's delete-log script.
|
||||
redef Log::default_rotation_postprocessor = "delete-log";
|
||||
redef Log::default_rotation_postprocessor_cmd = "delete-log";
|
||||
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
|
||||
@prefixes += cluster-worker
|
||||
|
||||
## Load the script for local site configuration for the worker nodes.
|
||||
@load site/local-worker
|
||||
|
||||
## Don't do any local logging.
|
||||
redef Log::enable_local_logging = F;
|
||||
|
||||
|
@ -8,7 +11,7 @@ redef Log::enable_local_logging = F;
|
|||
redef Log::enable_remote_logging = T;
|
||||
|
||||
## Use the cluster's delete-log script.
|
||||
redef Log::default_rotation_postprocessor = "delete-log";
|
||||
redef Log::default_rotation_postprocessor_cmd = "delete-log";
|
||||
|
||||
## Record all packets into trace file.
|
||||
# TODO: should we really be setting this to T?
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
@load base/frameworks/communication
|
||||
@load ./main
|
||||
@load base/frameworks/communication/main
|
||||
|
||||
module Cluster;
|
||||
|
||||
|
|
|
@ -1,5 +1 @@
|
|||
|
||||
# TODO: get rid of this as soon as the Expr.cc hack is changed.
|
||||
@if ( getenv("ENABLE_COMMUNICATION") != "" )
|
||||
@load ./main
|
||||
@endif
|
||||
|
|
|
@ -108,6 +108,9 @@ const src_names = {
|
|||
event bro_init()
|
||||
{
|
||||
Log::create_stream(COMMUNICATION, [$columns=Info]);
|
||||
|
||||
if ( |nodes| > 0 )
|
||||
enable_communication();
|
||||
}
|
||||
|
||||
function do_script_log_common(level: count, src: count, msg: string)
|
||||
|
|
|
@ -27,6 +27,17 @@ export {
|
|||
ev: any &optional;
|
||||
};
|
||||
|
||||
## Default function for building the path values for log filters if not
|
||||
## speficied otherwise by a filter. The default implementation uses ``id``
|
||||
## to derive a name.
|
||||
##
|
||||
## id: The log stream.
|
||||
## path: A suggested path value, which may be either the filter's ``path``
|
||||
## if defined or a fall-back generated internally.
|
||||
##
|
||||
## Returns: The path to be used for the filter.
|
||||
global default_path_func: function(id: ID, path: string) : string &redef;
|
||||
|
||||
## Filter customizing logging.
|
||||
type Filter: record {
|
||||
## Descriptive name to reference this filter.
|
||||
|
@ -50,7 +61,7 @@ export {
|
|||
## The specific interpretation of the string is up to
|
||||
## the used writer, and may for example be the destination
|
||||
## file name. Generally, filenames are expected to given
|
||||
## without any extensions; writers will add appropiate
|
||||
## without any extensions; writers will add appropiate
|
||||
## extensions automatically.
|
||||
path: string &optional;
|
||||
|
||||
|
@ -81,36 +92,34 @@ export {
|
|||
|
||||
## Information passed into rotation callback functions.
|
||||
type RotationInfo: record {
|
||||
writer: Writer; ##< Writer.
|
||||
path: string; ##< Original path value.
|
||||
open: time; ##< Time when opened.
|
||||
close: time; ##< Time when closed.
|
||||
writer: Writer; ##< Writer.
|
||||
fname: string; ##< Full name of the rotated file.
|
||||
path: string; ##< Original path value.
|
||||
open: time; ##< Time when opened.
|
||||
close: time; ##< Time when closed.
|
||||
terminating: bool; ##< True if rotation occured due to Bro shutting down.
|
||||
};
|
||||
|
||||
## Default rotation interval. Zero disables rotation.
|
||||
const default_rotation_interval = 0secs &redef;
|
||||
|
||||
## Default naming suffix format. Uses a strftime() style.
|
||||
const default_rotation_date_format = "%y-%m-%d_%H.%M.%S" &redef;
|
||||
## Default naming format for timestamps embedded into filenames. Uses a strftime() style.
|
||||
const default_rotation_date_format = "%Y-%m-%d-%H-%M-%S" &redef;
|
||||
|
||||
## Default postprocessor for writers outputting into files.
|
||||
const default_rotation_postprocessor = "" &redef;
|
||||
## Default shell command to run on rotated files. Empty for none.
|
||||
const default_rotation_postprocessor_cmd = "" &redef;
|
||||
|
||||
## Default function to construct the name of a rotated output file.
|
||||
## The default implementation appends info$date_fmt to the original
|
||||
## file name.
|
||||
##
|
||||
## info: Meta-data about the file to be rotated.
|
||||
global default_rotation_path_func: function(info: RotationInfo) : string &redef;
|
||||
## Specifies the default postprocessor function per writer type. Entries in this
|
||||
## table are initialized by each writer type.
|
||||
const default_rotation_postprocessors: table[Writer] of function(info: RotationInfo) : bool &redef;
|
||||
|
||||
## Type for controlling file rotation.
|
||||
type RotationControl: record {
|
||||
## Rotation interval.
|
||||
interv: interval &default=default_rotation_interval;
|
||||
## Format for timestamps embedded into rotated file names.
|
||||
date_fmt: string &default=default_rotation_date_format;
|
||||
## Postprocessor process to run on rotate file.
|
||||
postprocessor: string &default=default_rotation_postprocessor;
|
||||
## Callback function to trigger for rotated files. If not set, the default
|
||||
## comes out of default_rotation_postprocessors.
|
||||
postprocessor: function(info: RotationInfo) : bool &optional;
|
||||
};
|
||||
|
||||
## Specifies rotation parameters per ``(id, path)`` tuple.
|
||||
|
@ -133,6 +142,8 @@ export {
|
|||
global flush: function(id: ID): bool;
|
||||
global add_default_filter: function(id: ID) : bool;
|
||||
global remove_default_filter: function(id: ID) : bool;
|
||||
|
||||
global run_rotation_postprocessor_cmd: function(info: RotationInfo, npath: string) : bool;
|
||||
}
|
||||
|
||||
# We keep a script-level copy of all filters so that we can manipulate them.
|
||||
|
@ -140,10 +151,39 @@ global filters: table[ID, string] of Filter;
|
|||
|
||||
@load logging.bif.bro # Needs Filter and Stream defined.
|
||||
|
||||
function default_rotation_path_func(info: RotationInfo) : string
|
||||
module Log;
|
||||
|
||||
# Used internally by the log manager.
|
||||
function __default_rotation_postprocessor(info: RotationInfo) : bool
|
||||
{
|
||||
local date_fmt = rotation_control[info$writer, info$path]$date_fmt;
|
||||
return fmt("%s-%s", info$path, strftime(date_fmt, info$open));
|
||||
if ( info$writer in default_rotation_postprocessors )
|
||||
return default_rotation_postprocessors[info$writer](info);
|
||||
}
|
||||
|
||||
function default_path_func(id: ID, path: string) : string
|
||||
{
|
||||
# TODO for Seth: Do what you want. :)
|
||||
return path;
|
||||
}
|
||||
|
||||
# Run post-processor on file. If there isn't any postprocessor defined,
|
||||
# we move the file to a nicer name.
|
||||
function run_rotation_postprocessor_cmd(info: RotationInfo, npath: string) : bool
|
||||
{
|
||||
local pp_cmd = default_rotation_postprocessor_cmd;
|
||||
|
||||
if ( pp_cmd == "" )
|
||||
return T;
|
||||
|
||||
# The date format is hard-coded here to provide a standardized
|
||||
# script interface.
|
||||
system(fmt("%s %s %s %s %s %d",
|
||||
pp_cmd, npath, info$path,
|
||||
strftime("%y-%m-%d_%H.%M.%S", info$open),
|
||||
strftime("%y-%m-%d_%H.%M.%S", info$close),
|
||||
info$terminating));
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
function create_stream(id: ID, stream: Stream) : bool
|
||||
|
@ -159,9 +199,15 @@ function disable_stream(id: ID) : bool
|
|||
if ( ! __disable_stream(id) )
|
||||
return F;
|
||||
}
|
||||
|
||||
|
||||
function add_filter(id: ID, filter: Filter) : bool
|
||||
{
|
||||
# This is a work-around for the fact that we can't forward-declare
|
||||
# the default_path_func and then use it as &default in the record
|
||||
# definition.
|
||||
if ( ! filter?$path_func )
|
||||
filter$path_func = default_path_func;
|
||||
|
||||
filters[id, filter$name] = filter;
|
||||
return __add_filter(id, filter);
|
||||
}
|
||||
|
|
|
@ -26,4 +26,19 @@ export {
|
|||
const unset_field = "-" &redef;
|
||||
}
|
||||
|
||||
# Default function to postprocess a rotated ASCII log file. It moves the rotated
|
||||
# file to a new name that includes a timestamp with the opening time, and then
|
||||
# runs the writer's default postprocessor command on it.
|
||||
function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool
|
||||
{
|
||||
# Move file to name including both opening and closing time.
|
||||
local dst = fmt("%s.%s.log", info$path,
|
||||
strftime(Log::default_rotation_date_format, info$open));
|
||||
|
||||
system(fmt("/bin/mv %s %s", info$fname, dst));
|
||||
|
||||
# Run default postprocessor.
|
||||
return Log::run_rotation_postprocessor_cmd(info, dst);
|
||||
}
|
||||
|
||||
redef Log::default_rotation_postprocessors += { [Log::WRITER_ASCII] = default_rotation_postprocessor_func };
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
@load base/frameworks/metrics
|
||||
|
||||
redef enum Metrics::ID += {
|
||||
CONNS_ORIGINATED,
|
||||
CONNS_RESPONDED
|
||||
};
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Metrics::configure(CONNS_ORIGINATED, [$aggregation_mask=24, $break_interval=5mins]);
|
||||
Metrics::configure(CONNS_RESPONDED, [$aggregation_mask=24, $break_interval=5mins]);
|
||||
}
|
||||
|
||||
event connection_established(c: connection)
|
||||
{
|
||||
Metrics::add_data(CONNS_ORIGINATED, [$host=c$id$orig_h], 1);
|
||||
Metrics::add_data(CONNS_RESPONDED, [$host=c$id$resp_h], 1);
|
||||
}
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
@load base/frameworks/metrics
|
||||
|
||||
redef enum Metrics::ID += {
|
||||
HTTP_REQUESTS_BY_STATUS_CODE,
|
||||
HTTP_REQUESTS_BY_HOST,
|
||||
};
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Metrics::configure(HTTP_REQUESTS_BY_STATUS_CODE, [$aggregation_mask=24, $break_interval=10secs]);
|
||||
Metrics::configure(HTTP_REQUESTS_BY_HOST, [$break_interval=10secs]);
|
||||
}
|
||||
|
||||
event HTTP::log_http(rec: HTTP::Info)
|
||||
{
|
||||
if ( rec?$host )
|
||||
Metrics::add_data(HTTP_REQUESTS_BY_HOST, [$index=rec$host], 1);
|
||||
if ( rec?$status_code )
|
||||
Metrics::add_data(HTTP_REQUESTS_BY_STATUS_CODE, [$host=rec$id$orig_h, $index=fmt("%d", rec$status_code)], 1);
|
||||
}
|
|
@ -1,30 +1,28 @@
|
|||
##! This is the implementation of the metrics framework
|
||||
##! This is the implementation of the metrics framework.
|
||||
|
||||
module Metrics;
|
||||
|
||||
export {
|
||||
redef enum Log::ID += { METRICS };
|
||||
|
||||
|
||||
type ID: enum {
|
||||
ALL,
|
||||
NOTHING,
|
||||
};
|
||||
|
||||
const default_aggregation_mask = 24 &redef;
|
||||
const default_break_interval = 5mins &redef;
|
||||
## The default interval used for "breaking" metrics and writing the
|
||||
## current value to the logging stream.
|
||||
const default_break_interval = 15mins &redef;
|
||||
|
||||
# TODO: configure a metrics filter logging stream to log the current
|
||||
# metrics configuration in case someone is looking through
|
||||
# old logs and the configuration has changed since then.
|
||||
type Filter: record {
|
||||
name: ID &optional;
|
||||
## Global mask by which you'd like to aggregate traffic.
|
||||
aggregation_mask: count &optional;
|
||||
## This is essentially applying names to various subnets.
|
||||
aggregation_table: table[subnet] of string &optional;
|
||||
break_interval: interval &default=default_break_interval;
|
||||
type Info: record {
|
||||
ts: time &log;
|
||||
metric_id: ID &log;
|
||||
filter_name: string &log;
|
||||
agg_subnet: string &log &optional;
|
||||
index: string &log &optional;
|
||||
value: count &log;
|
||||
};
|
||||
|
||||
type Index: record {
|
||||
type Entry: record {
|
||||
## Host is the value to which this metric applies.
|
||||
host: addr &optional;
|
||||
|
||||
|
@ -36,36 +34,71 @@ export {
|
|||
## metric since multiple IP addresses could respond for the same Host
|
||||
## header value.
|
||||
index: string &default="";
|
||||
|
||||
## The value by which the counter should be increased in each filter
|
||||
## where this entry is accepted.
|
||||
increment: count &default=1;
|
||||
};
|
||||
|
||||
type Info: record {
|
||||
ts: time &log;
|
||||
name: ID &log;
|
||||
index: string &log &optional;
|
||||
agg_subnet: string &log &optional;
|
||||
value: count &log;
|
||||
# TODO: configure a metrics filter logging stream to log the current
|
||||
# metrics configuration in case someone is looking through
|
||||
# old logs and the configuration has changed since then.
|
||||
type Filter: record {
|
||||
## The :bro:type:`Metrics::ID` that this filter applies to.
|
||||
id: ID &optional;
|
||||
## The name for this filter so that multiple filters can be
|
||||
## applied to a single metrics to get a different view of the same
|
||||
## metric data being collected (different aggregation, break, etc).
|
||||
name: string &default="default";
|
||||
## A predicate so that you can decide per index if you would like
|
||||
## to accept the data being inserted.
|
||||
pred: function(entry: Entry): bool &optional;
|
||||
## Global mask by which you'd like to aggregate traffic.
|
||||
aggregation_mask: count &optional;
|
||||
## This is essentially applying names to various subnets.
|
||||
aggregation_table: table[subnet] of string &optional;
|
||||
## The interval at which the metric should be "broken" and written
|
||||
## to the logging stream.
|
||||
break_interval: interval &default=default_break_interval;
|
||||
## This determines if the result of this filter is sent to the metrics
|
||||
## logging stream. One use for the logging framework is as an internal
|
||||
## thresholding and statistics gathering utility that is meant to
|
||||
## never log but rather to generate notices and derive data.
|
||||
log: bool &default=T;
|
||||
## A straight threshold for generating a notice.
|
||||
notice_threshold: count &optional;
|
||||
## A series of thresholds at which to generate notices.
|
||||
notice_thresholds: vector of count &optional;
|
||||
## If this and a $notice_threshold value are set, this notice type
|
||||
## will be generated by the metrics framework.
|
||||
note: Notice::Type &optional;
|
||||
};
|
||||
|
||||
global add_filter: function(name: ID, filter: Filter);
|
||||
global add_data: function(name: ID, index: Index, increment: count);
|
||||
global add_filter: function(id: ID, filter: Filter);
|
||||
global add_data: function(id: ID, entry: Entry);
|
||||
|
||||
global log_metrics: event(rec: Info);
|
||||
}
|
||||
|
||||
global metric_filters: table[ID] of Filter = table();
|
||||
global metric_filters: table[ID] of vector of Filter = table();
|
||||
|
||||
type MetricIndex: table[string] of count &default=0;
|
||||
type MetricTable: table[string] of MetricIndex;
|
||||
global store: table[ID] of MetricTable = table();
|
||||
# This is indexed by metric ID and stream filter name.
|
||||
global store: table[ID, string] of MetricTable = table();
|
||||
|
||||
event bro_init()
|
||||
# This stores the current threshold index for filters using the
|
||||
# $notice_thresholds element.
|
||||
global thresholds: table[string] of count = {} &default=0;
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(METRICS, [$columns=Info, $ev=log_metrics]);
|
||||
}
|
||||
|
||||
function reset(name: ID)
|
||||
function reset(filter: Filter)
|
||||
{
|
||||
store[name] = table();
|
||||
store[filter$id, filter$name] = table();
|
||||
}
|
||||
|
||||
event log_it(filter: Filter)
|
||||
|
@ -73,70 +106,128 @@ event log_it(filter: Filter)
|
|||
# If this node is the manager in a cluster, this needs to request values
|
||||
# for this metric from all of the workers.
|
||||
|
||||
local id = filter$id;
|
||||
local name = filter$name;
|
||||
for ( agg_subnet in store[name] )
|
||||
for ( agg_subnet in store[id, name] )
|
||||
{
|
||||
local metric_values = store[name][agg_subnet];
|
||||
local metric_values = store[id, name][agg_subnet];
|
||||
for ( index in metric_values )
|
||||
{
|
||||
local val = metric_values[index];
|
||||
local m: Info = [$ts=network_time(),
|
||||
$name=name,
|
||||
$agg_subnet=fmt("%s", agg_subnet),
|
||||
$index=index,
|
||||
$metric_id=id,
|
||||
$filter_name=name,
|
||||
$agg_subnet=fmt("%s", agg_subnet),
|
||||
$index=index,
|
||||
$value=val];
|
||||
|
||||
if ( filter?$notice_threshold &&
|
||||
m$value >= filter$notice_threshold )
|
||||
{
|
||||
print m;
|
||||
NOTICE([$note=filter$note,
|
||||
$msg=fmt("Metrics threshold crossed by %s %d/%d", m$agg_subnet, m$value, filter$notice_threshold),
|
||||
$n=m$value]);
|
||||
}
|
||||
|
||||
else if ( filter?$notice_thresholds &&
|
||||
m$value >= filter$notice_thresholds[thresholds[cat(id,name)]] )
|
||||
{
|
||||
# TODO: implement this
|
||||
}
|
||||
|
||||
# If there wasn't an index, remove the field.
|
||||
if ( index == "" )
|
||||
delete m$index;
|
||||
|
||||
# If there wasn't an aggregation subnet, remove the field.
|
||||
if ( agg_subnet == "" )
|
||||
delete m$agg_subnet;
|
||||
|
||||
Log::write(METRICS, m);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
reset(name);
|
||||
reset(filter);
|
||||
|
||||
schedule filter$break_interval { log_it(filter) };
|
||||
}
|
||||
|
||||
function add_filter(name: ID, filter: Filter)
|
||||
function add_filter(id: ID, filter: Filter)
|
||||
{
|
||||
if ( filter?$aggregation_table && filter?$aggregation_mask )
|
||||
{
|
||||
print "INVALID Metric filter: Defined $aggregation_table and $aggregation_mask.";
|
||||
return;
|
||||
}
|
||||
if ( [id, filter$name] in store )
|
||||
{
|
||||
print fmt("INVALID Metric filter: Filter with name \"%s\" already exists.", filter$name);
|
||||
return;
|
||||
}
|
||||
if ( filter?$notice_threshold && filter?$notice_thresholds )
|
||||
{
|
||||
print "INVALID Metric filter: Defined both $notice_threshold and $notice_thresholds";
|
||||
return;
|
||||
}
|
||||
|
||||
filter$name = name;
|
||||
metric_filters[name] = filter;
|
||||
store[name] = table();
|
||||
if ( ! filter?$id )
|
||||
filter$id = id;
|
||||
|
||||
if ( id !in metric_filters )
|
||||
metric_filters[id] = vector();
|
||||
metric_filters[id][|metric_filters[id]|] = filter;
|
||||
|
||||
store[id, filter$name] = table();
|
||||
|
||||
# Only do this on the manager if in a cluster.
|
||||
schedule filter$break_interval { log_it(filter) };
|
||||
}
|
||||
|
||||
function add_data(name: ID, index: Index, increment: count)
|
||||
function add_data(id: ID, entry: Entry)
|
||||
{
|
||||
local conf = metric_filters[name];
|
||||
|
||||
local agg_subnet = "";
|
||||
if ( index?$host )
|
||||
if ( id !in metric_filters )
|
||||
return;
|
||||
|
||||
local filters = metric_filters[id];
|
||||
|
||||
# Add the data to any of the defined filters.
|
||||
for ( filter_id in filters )
|
||||
{
|
||||
if ( conf?$aggregation_mask )
|
||||
local filter = filters[filter_id];
|
||||
|
||||
# If this filter has a predicate, run the predicate and skip this
|
||||
# entry if the predicate return false.
|
||||
if ( filter?$pred &&
|
||||
! filter$pred(entry) )
|
||||
next;
|
||||
|
||||
local agg_subnet = "";
|
||||
local filt_store = store[id, filter$name];
|
||||
if ( entry?$host )
|
||||
{
|
||||
local agg_mask = conf$aggregation_mask;
|
||||
agg_subnet = fmt("%s", mask_addr(index$host, agg_mask));
|
||||
if ( filter?$aggregation_mask )
|
||||
{
|
||||
local agg_mask = filter$aggregation_mask;
|
||||
agg_subnet = fmt("%s", mask_addr(entry$host, agg_mask));
|
||||
}
|
||||
else if ( filter?$aggregation_table )
|
||||
{
|
||||
agg_subnet = fmt("%s", filter$aggregation_table[entry$host]);
|
||||
# if an aggregation table is being used and the value isn't
|
||||
# in the table, that means we aren't interested in it.
|
||||
if ( agg_subnet == "" )
|
||||
next;
|
||||
}
|
||||
else
|
||||
agg_subnet = fmt("%s", entry$host);
|
||||
}
|
||||
else if ( conf?$aggregation_table )
|
||||
agg_subnet = fmt("%s", conf$aggregation_table[index$host]);
|
||||
else
|
||||
agg_subnet = fmt("%s", index$host);
|
||||
|
||||
if ( agg_subnet !in filt_store )
|
||||
filt_store[agg_subnet] = table();
|
||||
|
||||
local fs = filt_store[agg_subnet];
|
||||
if ( entry$index !in fs )
|
||||
fs[entry$index] = 0;
|
||||
fs[entry$index] = fs[entry$index] + entry$increment;
|
||||
}
|
||||
|
||||
if ( agg_subnet !in store[name] )
|
||||
store[name][agg_subnet] = table();
|
||||
|
||||
if ( index$index !in store[name][agg_subnet] )
|
||||
store[name][agg_subnet][index$index] = 0;
|
||||
store[name][agg_subnet][index$index] = store[name][agg_subnet][index$index] + increment;
|
||||
}
|
||||
|
|
|
@ -6,7 +6,8 @@
|
|||
@load ./actions/drop
|
||||
@load ./actions/email_admin
|
||||
@load ./actions/page
|
||||
@load ./actions/add-geodata
|
||||
|
||||
# Load the script to add hostnames to emails by default.
|
||||
# NOTE: this exposes a memleak in async DNS lookups.
|
||||
#@load ./extend-email/hostnames
|
||||
# There shouldn't be any defaul toverhead from loading these since they
|
||||
# *should* only do anything when notices have the ACTION_EMAIL action applied.
|
||||
@load ./extend-email/hostnames
|
||||
|
|
47
scripts/base/frameworks/notice/actions/add-geodata.bro
Normal file
47
scripts/base/frameworks/notice/actions/add-geodata.bro
Normal file
|
@ -0,0 +1,47 @@
|
|||
##! This script adds geographic location data to notices for the "remote"
|
||||
##! host in a connection. It does make the assumption that one of the
|
||||
##! addresses in a connection is "local" and one is "remote" which is
|
||||
##! probably a safe assumption to make in most cases. If both addresses
|
||||
##! are remote, it will use the $src address.
|
||||
|
||||
module Notice;
|
||||
|
||||
export {
|
||||
redef enum Action += {
|
||||
## Indicates that the notice should have geodata added for the
|
||||
## "remote" host. :bro:id:`Site::local_nets` must be defined
|
||||
## in order for this to work.
|
||||
ACTION_ADD_GEODATA
|
||||
};
|
||||
|
||||
redef record Info += {
|
||||
## If libGeoIP support is built in, notices can have geographic
|
||||
## information attached to them.
|
||||
remote_location: geo_location &log &optional;
|
||||
};
|
||||
|
||||
## Notice types which should have the "remote" location looked up.
|
||||
## If GeoIP support is not built in, this does nothing.
|
||||
const lookup_location_types: set[Notice::Type] = {} &redef;
|
||||
|
||||
## Add a helper to the notice policy for looking up GeoIP data.
|
||||
redef Notice::policy += {
|
||||
[$pred(n: Notice::Info) = { return (n$note in Notice::lookup_location_types); },
|
||||
$priority = 10],
|
||||
};
|
||||
}
|
||||
|
||||
# This is handled at a high priority in case other notice handlers
|
||||
# want to use the data.
|
||||
event notice(n: Notice::Info) &priority=10
|
||||
{
|
||||
if ( ACTION_ADD_GEODATA in n$actions &&
|
||||
|Site::local_nets| > 0 &&
|
||||
! n?$remote_location )
|
||||
{
|
||||
if ( n?$src && ! Site::is_local_addr(n$src) )
|
||||
n$remote_location = lookup_location(n$src);
|
||||
else if ( n?$dst && ! Site::is_local_addr(n$dst) )
|
||||
n$remote_location = lookup_location(n$dst);
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue