mirror of
https://github.com/zeek/zeek.git
synced 2025-10-02 14:48:21 +00:00
Metrics framework checkpoint.
- Metrics API is much more similar to the Logging framework's API now. - Filters define all output and metrics collection now. - Initial attempt at thresholding and generating notices.
This commit is contained in:
parent
f36310dc0e
commit
9fa3bcffdd
7 changed files with 229 additions and 108 deletions
|
@ -1,19 +0,0 @@
|
|||
@load frameworks/metrics
|
||||
|
||||
redef enum Metrics::ID += {
|
||||
CONNS_ORIGINATED,
|
||||
CONNS_RESPONDED
|
||||
};
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Metrics::configure(CONNS_ORIGINATED, [$aggregation_mask=24, $break_interval=5mins]);
|
||||
Metrics::configure(CONNS_RESPONDED, [$aggregation_mask=24, $break_interval=5mins]);
|
||||
}
|
||||
|
||||
event connection_established(c: connection)
|
||||
{
|
||||
Metrics::add_data(CONNS_ORIGINATED, [$host=c$id$orig_h], 1);
|
||||
Metrics::add_data(CONNS_RESPONDED, [$host=c$id$resp_h], 1);
|
||||
}
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
@load frameworks/metrics
|
||||
|
||||
redef enum Metrics::ID += {
|
||||
HTTP_REQUESTS_BY_STATUS_CODE,
|
||||
HTTP_REQUESTS_BY_HOST,
|
||||
};
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Metrics::configure(HTTP_REQUESTS_BY_STATUS_CODE, [$aggregation_mask=24, $break_interval=10secs]);
|
||||
Metrics::configure(HTTP_REQUESTS_BY_HOST, [$break_interval=10secs]);
|
||||
}
|
||||
|
||||
event HTTP::log_http(rec: HTTP::Info)
|
||||
{
|
||||
if ( rec?$host )
|
||||
Metrics::add_data(HTTP_REQUESTS_BY_HOST, [$index=rec$host], 1);
|
||||
if ( rec?$status_code )
|
||||
Metrics::add_data(HTTP_REQUESTS_BY_STATUS_CODE, [$host=rec$id$orig_h, $index=fmt("%d", rec$status_code)], 1);
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
##! This is the implementation of the metrics framework
|
||||
##! This is the implementation of the metrics framework.
|
||||
|
||||
module Metrics;
|
||||
|
||||
|
@ -6,25 +6,23 @@ export {
|
|||
redef enum Log::ID += { METRICS };
|
||||
|
||||
type ID: enum {
|
||||
ALL,
|
||||
NOTHING,
|
||||
};
|
||||
|
||||
const default_aggregation_mask = 24 &redef;
|
||||
const default_break_interval = 5mins &redef;
|
||||
## The default interval used for "breaking" metrics and writing the
|
||||
## current value to the logging stream.
|
||||
const default_break_interval = 15mins &redef;
|
||||
|
||||
# TODO: configure a metrics filter logging stream to log the current
|
||||
# metrics configuration in case someone is looking through
|
||||
# old logs and the configuration has changed since then.
|
||||
type Filter: record {
|
||||
name: ID &optional;
|
||||
## Global mask by which you'd like to aggregate traffic.
|
||||
aggregation_mask: count &optional;
|
||||
## This is essentially applying names to various subnets.
|
||||
aggregation_table: table[subnet] of string &optional;
|
||||
break_interval: interval &default=default_break_interval;
|
||||
type Info: record {
|
||||
ts: time &log;
|
||||
metric_id: ID &log;
|
||||
filter_name: string &log;
|
||||
agg_subnet: string &log &optional;
|
||||
index: string &log &optional;
|
||||
value: count &log;
|
||||
};
|
||||
|
||||
type Index: record {
|
||||
type Entry: record {
|
||||
## Host is the value to which this metric applies.
|
||||
host: addr &optional;
|
||||
|
||||
|
@ -36,36 +34,71 @@ export {
|
|||
## metric since multiple IP addresses could respond for the same Host
|
||||
## header value.
|
||||
index: string &default="";
|
||||
|
||||
## The value by which the counter should be increased in each filter
|
||||
## where this entry is accepted.
|
||||
increment: count &default=1;
|
||||
};
|
||||
|
||||
type Info: record {
|
||||
ts: time &log;
|
||||
name: ID &log;
|
||||
index: string &log &optional;
|
||||
agg_subnet: string &log &optional;
|
||||
value: count &log;
|
||||
# TODO: configure a metrics filter logging stream to log the current
|
||||
# metrics configuration in case someone is looking through
|
||||
# old logs and the configuration has changed since then.
|
||||
type Filter: record {
|
||||
## The :bro:type:`Metrics::ID` that this filter applies to.
|
||||
id: ID &optional;
|
||||
## The name for this filter so that multiple filters can be
|
||||
## applied to a single metrics to get a different view of the same
|
||||
## metric data being collected (different aggregation, break, etc).
|
||||
name: string &default="default";
|
||||
## A predicate so that you can decide per index if you would like
|
||||
## to accept the data being inserted.
|
||||
pred: function(entry: Entry): bool &optional;
|
||||
## Global mask by which you'd like to aggregate traffic.
|
||||
aggregation_mask: count &optional;
|
||||
## This is essentially applying names to various subnets.
|
||||
aggregation_table: table[subnet] of string &optional;
|
||||
## The interval at which the metric should be "broken" and written
|
||||
## to the logging stream.
|
||||
break_interval: interval &default=default_break_interval;
|
||||
## This determines if the result of this filter is sent to the metrics
|
||||
## logging stream. One use for the logging framework is as an internal
|
||||
## thresholding and statistics gathering utility that is meant to
|
||||
## never log but rather to generate notices and derive data.
|
||||
log: bool &default=T;
|
||||
## A straight threshold for generating a notice.
|
||||
notice_threshold: count &optional;
|
||||
## A series of thresholds at which to generate notices.
|
||||
notice_thresholds: vector of count &optional;
|
||||
## If this and a $notice_threshold value are set, this notice type
|
||||
## will be generated by the metrics framework.
|
||||
note: Notice::Type &optional;
|
||||
};
|
||||
|
||||
global add_filter: function(name: ID, filter: Filter);
|
||||
global add_data: function(name: ID, index: Index, increment: count);
|
||||
global add_filter: function(id: ID, filter: Filter);
|
||||
global add_data: function(id: ID, entry: Entry);
|
||||
|
||||
global log_metrics: event(rec: Info);
|
||||
}
|
||||
|
||||
global metric_filters: table[ID] of Filter = table();
|
||||
global metric_filters: table[ID] of vector of Filter = table();
|
||||
|
||||
type MetricIndex: table[string] of count &default=0;
|
||||
type MetricTable: table[string] of MetricIndex;
|
||||
global store: table[ID] of MetricTable = table();
|
||||
# This is indexed by metric ID and stream filter name.
|
||||
global store: table[ID, string] of MetricTable = table();
|
||||
|
||||
event bro_init()
|
||||
# This stores the current threshold index for filters using the
|
||||
# $notice_thresholds element.
|
||||
global thresholds: table[string] of count = {} &default=0;
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(METRICS, [$columns=Info, $ev=log_metrics]);
|
||||
}
|
||||
|
||||
function reset(name: ID)
|
||||
function reset(filter: Filter)
|
||||
{
|
||||
store[name] = table();
|
||||
store[filter$id, filter$name] = table();
|
||||
}
|
||||
|
||||
event log_it(filter: Filter)
|
||||
|
@ -73,70 +106,128 @@ event log_it(filter: Filter)
|
|||
# If this node is the manager in a cluster, this needs to request values
|
||||
# for this metric from all of the workers.
|
||||
|
||||
local id = filter$id;
|
||||
local name = filter$name;
|
||||
for ( agg_subnet in store[name] )
|
||||
for ( agg_subnet in store[id, name] )
|
||||
{
|
||||
local metric_values = store[name][agg_subnet];
|
||||
local metric_values = store[id, name][agg_subnet];
|
||||
for ( index in metric_values )
|
||||
{
|
||||
local val = metric_values[index];
|
||||
local m: Info = [$ts=network_time(),
|
||||
$name=name,
|
||||
$metric_id=id,
|
||||
$filter_name=name,
|
||||
$agg_subnet=fmt("%s", agg_subnet),
|
||||
$index=index,
|
||||
$value=val];
|
||||
|
||||
if ( filter?$notice_threshold &&
|
||||
m$value >= filter$notice_threshold )
|
||||
{
|
||||
print m;
|
||||
NOTICE([$note=filter$note,
|
||||
$msg=fmt("Metrics threshold crossed by %s %d/%d", m$agg_subnet, m$value, filter$notice_threshold),
|
||||
$n=m$value]);
|
||||
}
|
||||
|
||||
else if ( filter?$notice_thresholds &&
|
||||
m$value >= filter$notice_thresholds[thresholds[cat(id,name)]] )
|
||||
{
|
||||
# TODO: implement this
|
||||
}
|
||||
|
||||
# If there wasn't an index, remove the field.
|
||||
if ( index == "" )
|
||||
delete m$index;
|
||||
|
||||
# If there wasn't an aggregation subnet, remove the field.
|
||||
if ( agg_subnet == "" )
|
||||
delete m$agg_subnet;
|
||||
|
||||
Log::write(METRICS, m);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
reset(name);
|
||||
reset(filter);
|
||||
|
||||
schedule filter$break_interval { log_it(filter) };
|
||||
}
|
||||
|
||||
function add_filter(name: ID, filter: Filter)
|
||||
function add_filter(id: ID, filter: Filter)
|
||||
{
|
||||
if ( filter?$aggregation_table && filter?$aggregation_mask )
|
||||
{
|
||||
print "INVALID Metric filter: Defined $aggregation_table and $aggregation_mask.";
|
||||
return;
|
||||
}
|
||||
if ( [id, filter$name] in store )
|
||||
{
|
||||
print fmt("INVALID Metric filter: Filter with name \"%s\" already exists.", filter$name);
|
||||
return;
|
||||
}
|
||||
if ( filter?$notice_threshold && filter?$notice_thresholds )
|
||||
{
|
||||
print "INVALID Metric filter: Defined both $notice_threshold and $notice_thresholds";
|
||||
return;
|
||||
}
|
||||
|
||||
filter$name = name;
|
||||
metric_filters[name] = filter;
|
||||
store[name] = table();
|
||||
if ( ! filter?$id )
|
||||
filter$id = id;
|
||||
|
||||
if ( id !in metric_filters )
|
||||
metric_filters[id] = vector();
|
||||
metric_filters[id][|metric_filters[id]|] = filter;
|
||||
|
||||
store[id, filter$name] = table();
|
||||
|
||||
# Only do this on the manager if in a cluster.
|
||||
schedule filter$break_interval { log_it(filter) };
|
||||
}
|
||||
|
||||
function add_data(name: ID, index: Index, increment: count)
|
||||
function add_data(id: ID, entry: Entry)
|
||||
{
|
||||
local conf = metric_filters[name];
|
||||
if ( id !in metric_filters )
|
||||
return;
|
||||
|
||||
local agg_subnet = "";
|
||||
if ( index?$host )
|
||||
local filters = metric_filters[id];
|
||||
|
||||
# Add the data to any of the defined filters.
|
||||
for ( filter_id in filters )
|
||||
{
|
||||
if ( conf?$aggregation_mask )
|
||||
local filter = filters[filter_id];
|
||||
|
||||
# If this filter has a predicate, run the predicate and skip this
|
||||
# entry if the predicate return false.
|
||||
if ( filter?$pred &&
|
||||
! filter$pred(entry) )
|
||||
next;
|
||||
|
||||
local agg_subnet = "";
|
||||
local filt_store = store[id, filter$name];
|
||||
if ( entry?$host )
|
||||
{
|
||||
local agg_mask = conf$aggregation_mask;
|
||||
agg_subnet = fmt("%s", mask_addr(index$host, agg_mask));
|
||||
if ( filter?$aggregation_mask )
|
||||
{
|
||||
local agg_mask = filter$aggregation_mask;
|
||||
agg_subnet = fmt("%s", mask_addr(entry$host, agg_mask));
|
||||
}
|
||||
else if ( filter?$aggregation_table )
|
||||
{
|
||||
agg_subnet = fmt("%s", filter$aggregation_table[entry$host]);
|
||||
# if an aggregation table is being used and the value isn't
|
||||
# in the table, that means we aren't interested in it.
|
||||
if ( agg_subnet == "" )
|
||||
next;
|
||||
}
|
||||
else
|
||||
agg_subnet = fmt("%s", entry$host);
|
||||
}
|
||||
else if ( conf?$aggregation_table )
|
||||
agg_subnet = fmt("%s", conf$aggregation_table[index$host]);
|
||||
else
|
||||
agg_subnet = fmt("%s", index$host);
|
||||
|
||||
if ( agg_subnet !in filt_store )
|
||||
filt_store[agg_subnet] = table();
|
||||
|
||||
local fs = filt_store[agg_subnet];
|
||||
if ( entry$index !in fs )
|
||||
fs[entry$index] = 0;
|
||||
fs[entry$index] = fs[entry$index] + entry$increment;
|
||||
}
|
||||
|
||||
if ( agg_subnet !in store[name] )
|
||||
store[name][agg_subnet] = table();
|
||||
|
||||
if ( index$index !in store[name][agg_subnet] )
|
||||
store[name][agg_subnet][index$index] = 0;
|
||||
store[name][agg_subnet][index$index] = store[name][agg_subnet][index$index] + increment;
|
||||
}
|
||||
|
|
20
scripts/policy/frameworks/metrics/conn-example.bro
Normal file
20
scripts/policy/frameworks/metrics/conn-example.bro
Normal file
|
@ -0,0 +1,20 @@
|
|||
|
||||
redef enum Metrics::ID += {
|
||||
CONNS_ORIGINATED,
|
||||
CONNS_RESPONDED
|
||||
};
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Metrics::add_filter(CONNS_ORIGINATED, [$aggregation_mask=24, $break_interval=1mins]);
|
||||
|
||||
# Site::local_nets must be defined in order for this to actually do anything.
|
||||
Metrics::add_filter(CONNS_RESPONDED, [$aggregation_table=Site::local_nets_table, $break_interval=1mins]);
|
||||
}
|
||||
|
||||
event connection_established(c: connection)
|
||||
{
|
||||
Metrics::add_data(CONNS_ORIGINATED, [$host=c$id$orig_h]);
|
||||
Metrics::add_data(CONNS_RESPONDED, [$host=c$id$resp_h]);
|
||||
}
|
||||
|
22
scripts/policy/frameworks/metrics/http-example.bro
Normal file
22
scripts/policy/frameworks/metrics/http-example.bro
Normal file
|
@ -0,0 +1,22 @@
|
|||
|
||||
|
||||
redef enum Metrics::ID += {
|
||||
HTTP_REQUESTS_BY_STATUS_CODE,
|
||||
HTTP_REQUESTS_BY_HOST_HEADER,
|
||||
};
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Metrics::add_filter(HTTP_REQUESTS_BY_HOST_HEADER, [$break_interval=5mins]);
|
||||
|
||||
# Site::local_nets must be defined in order for this to actually do anything.
|
||||
Metrics::add_filter(HTTP_REQUESTS_BY_STATUS_CODE, [$aggregation_table=Site::local_nets_table, $break_interval=5mins]);
|
||||
}
|
||||
|
||||
event HTTP::log_http(rec: HTTP::Info)
|
||||
{
|
||||
if ( rec?$host )
|
||||
Metrics::add_data(HTTP_REQUESTS_BY_HOST_HEADER, [$index=rec$host]);
|
||||
if ( rec?$status_code )
|
||||
Metrics::add_data(HTTP_REQUESTS_BY_STATUS_CODE, [$host=rec$id$orig_h, $index=fmt("%d", rec$status_code)]);
|
||||
}
|
22
scripts/policy/frameworks/metrics/ssl-example.bro
Normal file
22
scripts/policy/frameworks/metrics/ssl-example.bro
Normal file
|
@ -0,0 +1,22 @@
|
|||
|
||||
|
||||
redef enum Metrics::ID += {
|
||||
SSL_SERVERNAME,
|
||||
};
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Metrics::add_filter(SSL_SERVERNAME,
|
||||
[$name="no-google-ssl-servers",
|
||||
$pred(entry: Metrics::Entry) = {
|
||||
return (/google\.com$/ !in entry$index);
|
||||
},
|
||||
$break_interval=10secs
|
||||
]);
|
||||
}
|
||||
|
||||
event SSL::log_ssl(rec: SSL::Info)
|
||||
{
|
||||
if ( rec?$server_name )
|
||||
Metrics::add_data(SSL_SERVERNAME, [$index=rec$server_name]);
|
||||
}
|
|
@ -9,17 +9,17 @@ export {
|
|||
};
|
||||
|
||||
redef enum Metrics::ID += {
|
||||
SQL_ATTACKER,
|
||||
SQL_ATTACK_AGAINST,
|
||||
SQL_ATTACKS,
|
||||
SQL_ATTACKS_AGAINST,
|
||||
};
|
||||
|
||||
redef enum Tags += {
|
||||
## Indicator of a URI based SQL injection attack.
|
||||
URI_SQLI,
|
||||
## Indicator of client body based SQL injection attack. This is
|
||||
## typically the body content of a POST request. Not implemented yet!
|
||||
## typically the body content of a POST request. Not implemented yet.
|
||||
POST_SQLI,
|
||||
## Indicator of a cookie based SQL injection attack. Not implemented yet!
|
||||
## Indicator of a cookie based SQL injection attack. Not implemented yet.
|
||||
COOKIE_SQLI,
|
||||
};
|
||||
|
||||
|
@ -30,13 +30,18 @@ export {
|
|||
| /[\?&][^[:blank:]\x00-\x37]+?=[\-0-9%]*([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x37]|\/\*.*?\*\/)*(-|=|\+|\|\|)([[:blank:]\x00-\x37]|\/\*.*?\*\/)*([0-9]|\(?[cC][oO][nN][vV][eE][rR][tT]|[cC][aA][sS][tT])/
|
||||
| /[\?&][^[:blank:]\x00-\x37\|]+?=([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x37]|\/\*.*?\*\/|;)*([xX]?[oO][rR]|[nN]?[aA][nN][dD]|[hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[rR][eE][gG][eE][xX][pP]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x37]|\/\*.*?\*\/|[\[(])+[a-zA-Z&]{2,}/
|
||||
| /[\?&][^[:blank:]\x00-\x37]+?=[^\.]*?([cC][hH][aA][rR]|[aA][sS][cC][iI][iI]|[sS][uU][bB][sS][tT][rR][iI][nN][gG]|[tT][rR][uU][nN][cC][aA][tT][eE]|[vV][eE][rR][sS][iI][oO][nN]|[lL][eE][nN][gG][tT][hH])\(/
|
||||
| /\/\*![[:digit:]]{5}.*?\*\//;
|
||||
| /\/\*![[:digit:]]{5}.*?\*\// &redef;
|
||||
}
|
||||
|
||||
event bro_init()
|
||||
{
|
||||
Metrics::add_filter(SQL_ATTACKER, [$break_interval=5mins, $note=SQL_Injection_Attack]);
|
||||
Metrics::add_filter(SQL_ATTACK_AGAINST, [$break_interval=5mins, $note=SQL_Injection_Attack]);
|
||||
Metrics::add_filter(SQL_ATTACKS, [$log=T,
|
||||
$break_interval=1mins,
|
||||
$note=SQL_Injection_Attacker]);
|
||||
Metrics::add_filter(SQL_ATTACKS_AGAINST, [$log=T,
|
||||
$break_interval=1mins,
|
||||
$note=SQL_Injection_Attack,
|
||||
$notice_thresholds=vector(10,100)]);
|
||||
}
|
||||
|
||||
event http_request(c: connection, method: string, original_URI: string,
|
||||
|
@ -46,7 +51,7 @@ event http_request(c: connection, method: string, original_URI: string,
|
|||
{
|
||||
add c$http$tags[URI_SQLI];
|
||||
|
||||
Metrics::add_data(SQL_ATTACKER, [$host=c$id$orig_h], 1);
|
||||
Metrics::add_data(SQL_ATTACK_AGAINST, [$host=c$id$resp_h], 1);
|
||||
Metrics::add_data(SQL_ATTACKS, [$host=c$id$orig_h]);
|
||||
Metrics::add_data(SQL_ATTACKS_AGAINST, [$host=c$id$resp_h]);
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue