mirror of
https://github.com/zeek/zeek.git
synced 2025-10-07 09:08:20 +00:00
Checkpoint, don't try running this. It's broken all over the place.
This commit is contained in:
parent
8778761c07
commit
6dc204b385
14 changed files with 352 additions and 379 deletions
|
@ -5,24 +5,16 @@
|
|||
module Measurement;
|
||||
|
||||
export {
|
||||
## The metrics logging stream identifier.
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
## This is the interval for how often threshold based notices will happen
|
||||
## after they have already fired.
|
||||
const threshold_crossed_restart_interval = 1hr &redef;
|
||||
|
||||
## The various calculations are all defined as plugins.
|
||||
type Calculation: enum {
|
||||
PLACEHOLDER
|
||||
};
|
||||
|
||||
## Represents a thing which is having metrics collected for it. An instance
|
||||
## of this record type and an id together represent a single measurement.
|
||||
type Index: record {
|
||||
## Represents a thing which is having measurement results collected for it.
|
||||
type Key: record {
|
||||
## A non-address related metric or a sub-key for an address based metric.
|
||||
## An example might be successful SSH connections by client IP address
|
||||
## where the client string would be the index value.
|
||||
## where the client string would be the key value.
|
||||
## Another example might be number of HTTP requests to a particular
|
||||
## value in a Host header. This is an example of a non-host based
|
||||
## metric since multiple IP addresses could respond for the same Host
|
||||
|
@ -44,176 +36,152 @@ export {
|
|||
str: string &optional;
|
||||
};
|
||||
|
||||
## Value supplied when a metric is finished. It contains all
|
||||
## of the measurements collected for the metric. Most of the
|
||||
## fields are added by calculation plugins.
|
||||
type ResultVal: record {
|
||||
## The time when this result was first started.
|
||||
type Reducer: record {
|
||||
## Data stream identifier for the reducer to attach to.
|
||||
stream: string;
|
||||
|
||||
## The calculations to perform on the data points.
|
||||
apply: set[Calculation];
|
||||
|
||||
## A predicate so that you can decide per key if you would like
|
||||
## to accept the data being inserted.
|
||||
pred: function(key: Measurement::Key, data: Measurement::DataPoint): bool &optional;
|
||||
|
||||
## A function to normalize the key. This can be used to aggregate or
|
||||
## normalize the entire key.
|
||||
normalize_key: function(key: Measurement::Key): Key &optional;
|
||||
};
|
||||
|
||||
## Value calculated for a data point stream fed into a reducer.
|
||||
## Most of the fields are added by plugins.
|
||||
type Result: record {
|
||||
## The time when the first data point was added to this result value.
|
||||
begin: time &log;
|
||||
|
||||
## The time when the last value was added to this result.
|
||||
## The time when the last data point was added to this result value.
|
||||
end: time &log;
|
||||
|
||||
## The number of measurements received.
|
||||
num: count &log &default=0;
|
||||
|
||||
## A sample of something being measured. This is helpful in
|
||||
## some cases for collecting information to do further detection
|
||||
## or better logging for forensic purposes.
|
||||
samples: vector of string &optional;
|
||||
};
|
||||
|
||||
type Measurement: record {
|
||||
## The calculations to perform on the data.
|
||||
apply: set[Calculation];
|
||||
|
||||
## A predicate so that you can decide per index if you would like
|
||||
## to accept the data being inserted.
|
||||
pred: function(index: Measurement::Index, data: Measurement::DataPoint): bool &optional;
|
||||
|
||||
## A function to normalize the index. This can be used to aggregate or
|
||||
## normalize the entire index.
|
||||
normalize_func: function(index: Measurement::Index): Index &optional;
|
||||
|
||||
## A number of sample DataPoints to collect.
|
||||
samples: count &optional;
|
||||
};
|
||||
|
||||
|
||||
type Results: record {
|
||||
begin: time;
|
||||
end: time;
|
||||
result
|
||||
};
|
||||
|
||||
## Type to store a table of metrics result values.
|
||||
type ResultTable: table[Index] of Results;
|
||||
## Type to store a table of measurement results. First table is
|
||||
## indexed on the measurement Key and the enclosed table is
|
||||
## indexed on the data id that the Key was relevant for.
|
||||
type ResultTable: table[Key] of table[string] of Result;
|
||||
|
||||
## Filters define how the data from a metric is aggregated and handled.
|
||||
## Filters can be used to set how often the measurements are cut
|
||||
## and logged or how the data within them is aggregated.
|
||||
type Filter: record {
|
||||
## A name for the filter in case multiple filters are being
|
||||
## applied to the same metric. In most cases the default
|
||||
## filter name is fine and this field does not need to be set.
|
||||
id: string;
|
||||
|
||||
## The interval at which this filter should be "broken" and written
|
||||
## to the logging stream. The counters are also reset to zero at
|
||||
type Measurement: record {
|
||||
## The interval at which this filter should be "broken" and the
|
||||
## callback called. The counters are also reset to zero at
|
||||
## this time so any threshold based detection needs to be set to a
|
||||
## number that should be expected to happen within this period.
|
||||
every: interval;
|
||||
epoch: interval;
|
||||
|
||||
## Optionally provide a function to calculate a value from the ResultVal
|
||||
## structure which will be used for thresholding. If no function is
|
||||
## provided, then in the following order of preference either the
|
||||
## $unique or the $sum fields will be used.
|
||||
threshold_val_func: function(val: Measurement::ResultVal): count &optional;
|
||||
## The reducers for the measurement indexed by data id.
|
||||
reducers: set[Reducer];
|
||||
|
||||
## Optionally provide a function to calculate a value from the Result
|
||||
## structure which will be used for thresholding.
|
||||
threshold_val: function(result: Measurement::Result): count &optional;
|
||||
|
||||
## The threshold value for calling the $threshold_crossed callback.
|
||||
threshold: count &optional;
|
||||
|
||||
## A series of thresholds for calling the $threshold_crossed callback.
|
||||
threshold_series: vector of count &optional;
|
||||
|
||||
## A callback that is called when a threshold is crossed.
|
||||
threshold_crossed: function(key: Measurement::Key, result: Measurement::Result) &optional;
|
||||
|
||||
## A callback with the full collection of ResultVals for this filter.
|
||||
## A callback with the full collection of Results for this filter.
|
||||
## It's best to not access any global state outside of the variables
|
||||
## given to the callback because there is no assurance provided as to
|
||||
## where the callback will be executed on clusters.
|
||||
period_finished: function(data: Measurement::ResultTable) &optional;
|
||||
|
||||
## A callback that is called when a threshold is crossed.
|
||||
threshold_crossed: function(index: Measurement::Index, val: Measurement::ResultVal) &optional;
|
||||
};
|
||||
|
||||
## Function to associate a metric filter with a metric ID.
|
||||
##
|
||||
## id: The metric ID that the filter should be associated with.
|
||||
##
|
||||
## filter: The record representing the filter configuration.
|
||||
global add_filter: function(id: string, filter: Measurement::Filter);
|
||||
|
||||
## Create a measurement.
|
||||
global create: function(m: Measurement::Measurement);
|
||||
|
||||
## Add data into a metric. This should be called when
|
||||
## a script has measured some point value and is ready to increment the
|
||||
## counters.
|
||||
##
|
||||
## id: The metric identifier that the data represents.
|
||||
##
|
||||
## index: The metric index that the value is to be added to.
|
||||
## key: The metric key that the value is to be added to.
|
||||
##
|
||||
## increment: How much to increment the counter by.
|
||||
global add_data: function(id: string, index: Measurement::Index, data: Measurement::DataPoint);
|
||||
## data: The data point to send into the stream.
|
||||
global add_data: function(id: string, key: Measurement::Key, data: Measurement::DataPoint);
|
||||
|
||||
## Helper function to represent a :bro:type:`Measurement::Index` value as
|
||||
## Helper function to represent a :bro:type:`Measurement::Key` value as
|
||||
## a simple string.
|
||||
##
|
||||
## index: The metric index that is to be converted into a string.
|
||||
## key: The metric key that is to be converted into a string.
|
||||
##
|
||||
## Returns: A string reprentation of the metric index.
|
||||
global index2str: function(index: Measurement::Index): string;
|
||||
|
||||
## Event to access metrics records as they are passed to the logging framework.
|
||||
global log_metrics: event(rec: Measurement::Info);
|
||||
## Returns: A string representation of the metric key.
|
||||
global key2str: function(key: Measurement::Key): string;
|
||||
|
||||
}
|
||||
|
||||
redef record Filter += {
|
||||
# Internal use only. The metric that this filter applies to. The value is automatically set.
|
||||
id: string &optional;
|
||||
redef record Reducer += {
|
||||
# Internal use only. Measurement ID.
|
||||
mid: string &optional;
|
||||
};
|
||||
|
||||
redef record ResultVal += {
|
||||
# Internal use only. This is the queue where samples
|
||||
# are maintained since the queue is self managing for
|
||||
# the number of samples requested.
|
||||
sample_queue: Queue::Queue &optional;
|
||||
|
||||
redef record Result += {
|
||||
# Internal use only. Indicates if a simple threshold was already crossed.
|
||||
is_threshold_crossed: bool &default=F;
|
||||
|
||||
# Internal use only. Current index for threshold series.
|
||||
# Internal use only. Current key for threshold series.
|
||||
threshold_series_index: count &default=0;
|
||||
};
|
||||
|
||||
# Store the filters indexed on the metric identifier and filter name.
|
||||
global filter_store: table[string, string] of Filter = table();
|
||||
redef record Measurement += {
|
||||
# Internal use only (mostly for cluster coherency).
|
||||
id: string &optional;
|
||||
};
|
||||
|
||||
# This is indexed by metric id and filter name.
|
||||
global store: table[string, string] of ResultTable = table();
|
||||
# Store of reducers indexed on the data id.
|
||||
global reducer_store: table[string] of set[Reducer] = table();
|
||||
|
||||
# This is a hook for watching thresholds being crossed. It is called whenever
|
||||
# index values are updated and the new val is given as the `val` argument.
|
||||
# Store of results indexed on the measurement id.
|
||||
global result_store: table[string] of ResultTable = table();
|
||||
|
||||
# Store of measurements indexed on the measurement id.
|
||||
global measurement_store: table[string] of Measurement = table();
|
||||
|
||||
# This is called whenever
|
||||
# key values are updated and the new val is given as the `val` argument.
|
||||
# It's only prototyped here because cluster and non-cluster have separate
|
||||
# implementations.
|
||||
global data_added: function(filter: Filter, index: Index, val: ResultVal);
|
||||
global data_added: function(m: Measurement, key: Key, result: Result);
|
||||
|
||||
# Prototype the hook point for plugins to do calculations.
|
||||
global add_to_calculation: hook(filter: Filter, val: double, data: DataPoint, result: ResultVal);
|
||||
# Prototype the hook point for plugins to merge Measurements.
|
||||
global plugin_merge_measurements: hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal);
|
||||
global add_to_reducer: hook(r: Reducer, val: double, data: DataPoint, result: Result);
|
||||
# Prototype the hook point for plugins to merge Results.
|
||||
global compose_resultvals_hook: hook(result: Result, rv1: Result, rv2: Result);
|
||||
|
||||
# Event that is used to "finish" metrics and adapt the metrics
|
||||
# Event that is used to "finish" measurements and adapt the measurement
|
||||
# framework for clustered or non-clustered usage.
|
||||
global finish_period: event(filter: Measurement::Filter);
|
||||
global finish_period: event(m: Measurement);
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Measurement::LOG, [$columns=Info, $ev=log_metrics]);
|
||||
}
|
||||
|
||||
function index2str(index: Index): string
|
||||
function key2str(key: Key): string
|
||||
{
|
||||
local out = "";
|
||||
if ( index?$host )
|
||||
out = fmt("%shost=%s", out, index$host);
|
||||
if ( index?$str )
|
||||
out = fmt("%s%sstr=%s", out, |out|==0 ? "" : ", ", index$str);
|
||||
return fmt("metric_index(%s)", out);
|
||||
if ( key?$host )
|
||||
out = fmt("%shost=%s", out, key$host);
|
||||
if ( key?$str )
|
||||
out = fmt("%s%sstr=%s", out, |out|==0 ? "" : ", ", key$str);
|
||||
return fmt("metric_key(%s)", out);
|
||||
}
|
||||
|
||||
function merge_result_vals(rv1: ResultVal, rv2: ResultVal): ResultVal
|
||||
function compose_resultvals(rv1: Result, rv2: Result): Result
|
||||
{
|
||||
local result: ResultVal;
|
||||
local result: Result;
|
||||
|
||||
# Merge $begin (take the earliest one)
|
||||
result$begin = (rv1$begin < rv2$begin) ? rv1$begin : rv2$begin;
|
||||
|
@ -224,16 +192,6 @@ function merge_result_vals(rv1: ResultVal, rv2: ResultVal): ResultVal
|
|||
# Merge $num
|
||||
result$num = rv1$num + rv2$num;
|
||||
|
||||
hook plugin_merge_measurements(result, rv1, rv2);
|
||||
|
||||
# Merge $sample_queue
|
||||
if ( rv1?$sample_queue && rv2?$sample_queue )
|
||||
result$sample_queue = Queue::merge(rv1$sample_queue, rv2$sample_queue);
|
||||
else if ( rv1?$sample_queue )
|
||||
result$sample_queue = rv1$sample_queue;
|
||||
else if ( rv2?$sample_queue )
|
||||
result$sample_queue = rv2$sample_queue;
|
||||
|
||||
# Merge $threshold_series_index
|
||||
result$threshold_series_index = (rv1$threshold_series_index > rv2$threshold_series_index) ? rv1$threshold_series_index : rv2$threshold_series_index;
|
||||
|
||||
|
@ -241,105 +199,103 @@ function merge_result_vals(rv1: ResultVal, rv2: ResultVal): ResultVal
|
|||
if ( rv1$is_threshold_crossed || rv2$is_threshold_crossed )
|
||||
result$is_threshold_crossed = T;
|
||||
|
||||
hook compose_resultvals_hook(result, rv1, rv2);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function reset(filter: Filter)
|
||||
function reset(m: Measurement)
|
||||
{
|
||||
if ( [filter$id, filter$name] in store )
|
||||
delete store[filter$id, filter$name];
|
||||
if ( m$id in result_store )
|
||||
delete result_store[m$id];
|
||||
|
||||
store[filter$id, filter$name] = table();
|
||||
result_store[m$id] = table();
|
||||
}
|
||||
|
||||
function add_filter(id: string, filter: Filter)
|
||||
function create(m: Measurement)
|
||||
{
|
||||
if ( [id, filter$name] in store )
|
||||
m$id=unique_id("");
|
||||
measurement_store[m$id] = m;
|
||||
|
||||
for ( reducer in m$reducers )
|
||||
{
|
||||
Reporter::warning(fmt("invalid Metric filter (%s): Filter with same name already exists.", filter$name));
|
||||
return;
|
||||
reducer$mid = m$id;
|
||||
if ( reducer$stream !in reducer_store )
|
||||
reducer_store[reducer$stream] = set();
|
||||
add reducer_store[reducer$stream][reducer];
|
||||
}
|
||||
|
||||
if ( ! filter?$id )
|
||||
filter$id = id;
|
||||
|
||||
filter_store[id, filter$name] = filter;
|
||||
store[id, filter$name] = table();
|
||||
|
||||
schedule filter$every { Measurement::finish_period(filter) };
|
||||
reset(m);
|
||||
schedule m$epoch { Measurement::finish_period(m) };
|
||||
}
|
||||
|
||||
function add_data(id: string, index: Index, data: DataPoint)
|
||||
function add_data(data_id: string, key: Key, data: DataPoint)
|
||||
{
|
||||
# Try to add the data to all of the defined filters for the metric.
|
||||
for ( [metric_id, filter_id] in filter_store )
|
||||
# Try to add the data to all of the defined reducers.
|
||||
if ( data_id !in reducer_store )
|
||||
return;
|
||||
|
||||
for ( r in reducer_store[data_id] )
|
||||
{
|
||||
local filter = filter_store[metric_id, filter_id];
|
||||
|
||||
# If this filter has a predicate, run the predicate and skip this
|
||||
# index if the predicate return false.
|
||||
if ( filter?$pred && ! filter$pred(index, data) )
|
||||
# If this reducer has a predicate, run the predicate
|
||||
# and skip this key if the predicate return false.
|
||||
if ( r?$pred && ! r$pred(key, data) )
|
||||
next;
|
||||
|
||||
#if ( filter?$normalize_func )
|
||||
# index = filter$normalize_func(copy(index));
|
||||
if ( r?$normalize_key )
|
||||
key = r$normalize_key(copy(key));
|
||||
|
||||
local metric_tbl = store[id, filter$name];
|
||||
if ( index !in metric_tbl )
|
||||
metric_tbl[index] = [$begin=network_time(), $end=network_time()];
|
||||
local m = measurement_store[r$mid];
|
||||
local results = result_store[m$id];
|
||||
if ( key !in results )
|
||||
results[key] = table();
|
||||
if ( data_id !in results[key] )
|
||||
results[key][data_id] = [$begin=network_time(), $end=network_time()];
|
||||
|
||||
local result = metric_tbl[index];
|
||||
local result = results[key][data_id];
|
||||
++result$num;
|
||||
# Continually update the $end field.
|
||||
result$end=network_time();
|
||||
|
||||
# If a string was given, fall back to 1.0 as the value.
|
||||
local val = 1.0;
|
||||
if ( data?$num || data?$dbl )
|
||||
val = data?$dbl ? data$dbl : data$num;
|
||||
|
||||
++result$num;
|
||||
# Continually update the $end field.
|
||||
result$end=network_time();
|
||||
|
||||
#if ( filter?$samples && filter$samples > 0 && data?$str )
|
||||
# {
|
||||
# if ( ! result?$sample_queue )
|
||||
# result$sample_queue = Queue::init([$max_len=filter$samples]);
|
||||
# Queue::push(result$sample_queue, data$str);
|
||||
# }
|
||||
|
||||
hook add_to_calculation(filter, val, data, result);
|
||||
data_added(filter, index, result);
|
||||
hook add_to_reducer(r, val, data, result);
|
||||
data_added(m, key, result);
|
||||
}
|
||||
}
|
||||
|
||||
# This function checks if a threshold has been crossed. It is also used as a method to implement
|
||||
# mid-break-interval threshold crossing detection for cluster deployments.
|
||||
function check_thresholds(filter: Filter, index: Index, val: ResultVal, modify_pct: double): bool
|
||||
function check_thresholds(m: Measurement, key: Key, result: Result, modify_pct: double): bool
|
||||
{
|
||||
if ( ! (filter?$threshold || filter?$threshold_series) )
|
||||
return;
|
||||
if ( ! (m?$threshold || m?$threshold_series) )
|
||||
return F;
|
||||
|
||||
local watch = 0.0;
|
||||
if ( val?$unique )
|
||||
watch = val$unique;
|
||||
else if ( val?$sum )
|
||||
watch = val$sum;
|
||||
#if ( val?$unique )
|
||||
# watch = val$unique;
|
||||
#else if ( val?$sum )
|
||||
# watch = val$sum;
|
||||
|
||||
if ( filter?$threshold_val_func )
|
||||
watch = filter$threshold_val_func(val);
|
||||
if ( m?$threshold_val )
|
||||
watch = m$threshold_val(result);
|
||||
|
||||
if ( modify_pct < 1.0 && modify_pct > 0.0 )
|
||||
watch = watch/modify_pct;
|
||||
|
||||
if ( ! val$is_threshold_crossed &&
|
||||
filter?$threshold && watch >= filter$threshold )
|
||||
if ( ! result$is_threshold_crossed &&
|
||||
m?$threshold && watch >= m$threshold )
|
||||
{
|
||||
# A default threshold was given and the value crossed it.
|
||||
return T;
|
||||
}
|
||||
|
||||
if ( filter?$threshold_series &&
|
||||
|filter$threshold_series| >= val$threshold_series_index &&
|
||||
watch >= filter$threshold_series[val$threshold_series_index] )
|
||||
if ( m?$threshold_series &&
|
||||
|m$threshold_series| >= result$threshold_series_index &&
|
||||
watch >= m$threshold_series[result$threshold_series_index] )
|
||||
{
|
||||
# A threshold series was given and the value crossed the next
|
||||
# value in the series.
|
||||
|
@ -349,19 +305,19 @@ function check_thresholds(filter: Filter, index: Index, val: ResultVal, modify_p
|
|||
return F;
|
||||
}
|
||||
|
||||
function threshold_crossed(filter: Filter, index: Index, val: ResultVal)
|
||||
function threshold_crossed(m: Measurement, key: Key, result: Result)
|
||||
{
|
||||
if ( ! filter?$threshold_crossed )
|
||||
if ( ! m?$threshold_crossed )
|
||||
return;
|
||||
|
||||
if ( val?$sample_queue )
|
||||
val$samples = Queue::get_str_vector(val$sample_queue);
|
||||
#if ( val?$sample_queue )
|
||||
# val$samples = Queue::get_str_vector(val$sample_queue);
|
||||
|
||||
filter$threshold_crossed(index, val);
|
||||
val$is_threshold_crossed = T;
|
||||
m$threshold_crossed(key, result);
|
||||
result$is_threshold_crossed = T;
|
||||
|
||||
# Bump up to the next threshold series index if a threshold series is being used.
|
||||
if ( filter?$threshold_series )
|
||||
++val$threshold_series_index;
|
||||
if ( m?$threshold_series )
|
||||
++result$threshold_series_index;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue