From 8778761c07e3d9009dd1e619a9e82bd525285d4a Mon Sep 17 00:00:00 2001 From: Seth Hall Date: Wed, 13 Mar 2013 22:55:03 -0400 Subject: [PATCH] Checkpoint --- doc/scripts/DocSourcesList.cmake | 6 +- .../{metrics => measurement}/__load__.bro | 2 + .../{metrics => measurement}/cluster.bro | 60 +- scripts/base/frameworks/measurement/main.bro | 367 ++++++++++ .../frameworks/measurement/non-cluster.bro | 21 + .../measurement/plugins/__load__.bro | 7 + .../measurement/plugins/average.bro | 35 + .../frameworks/measurement/plugins/max.bro | 37 + .../frameworks/measurement/plugins/min.bro | 35 + .../measurement/plugins/std-dev.bro | 36 + .../frameworks/measurement/plugins/sum.bro | 35 + .../frameworks/measurement/plugins/unique.bro | 51 ++ .../measurement/plugins/variance.bro | 65 ++ .../base/frameworks/measurement/simple.bro | 6 + scripts/base/frameworks/metrics/main.bro | 664 ------------------ .../base/frameworks/metrics/non-cluster.bro | 37 - scripts/base/init-default.bro | 2 +- scripts/base/protocols/ssh/main.bro | 108 ++- .../frameworks/metrics/conn-example.bro | 2 +- .../frameworks/metrics/http-example.bro | 2 +- .../policy/frameworks/metrics/ssl-example.bro | 2 +- scripts/policy/misc/app-metrics.bro | 68 +- .../policy/misc/detect-traceroute/main.bro | 2 +- scripts/policy/misc/scan.bro | 10 +- .../protocols/conn/conn-stats-per-host.bro | 2 +- scripts/policy/protocols/conn/metrics.bro | 2 +- .../protocols/ftp/detect-bruteforcing.bro | 7 +- scripts/policy/protocols/http/detect-sqli.bro | 2 +- scripts/policy/protocols/smtp/metrics.bro | 2 +- .../protocols/ssh/detect-bruteforcing.bro | 6 +- 30 files changed, 833 insertions(+), 848 deletions(-) rename scripts/base/frameworks/{metrics => measurement}/__load__.bro (93%) rename scripts/base/frameworks/{metrics => measurement}/cluster.bro (83%) create mode 100644 scripts/base/frameworks/measurement/main.bro create mode 100644 scripts/base/frameworks/measurement/non-cluster.bro create mode 100644 scripts/base/frameworks/measurement/plugins/__load__.bro create mode 100644 scripts/base/frameworks/measurement/plugins/average.bro create mode 100644 scripts/base/frameworks/measurement/plugins/max.bro create mode 100644 scripts/base/frameworks/measurement/plugins/min.bro create mode 100644 scripts/base/frameworks/measurement/plugins/std-dev.bro create mode 100644 scripts/base/frameworks/measurement/plugins/sum.bro create mode 100644 scripts/base/frameworks/measurement/plugins/unique.bro create mode 100644 scripts/base/frameworks/measurement/plugins/variance.bro create mode 100644 scripts/base/frameworks/measurement/simple.bro delete mode 100644 scripts/base/frameworks/metrics/main.bro delete mode 100644 scripts/base/frameworks/metrics/non-cluster.bro diff --git a/doc/scripts/DocSourcesList.cmake b/doc/scripts/DocSourcesList.cmake index 5ce7221909..4e957d03a0 100644 --- a/doc/scripts/DocSourcesList.cmake +++ b/doc/scripts/DocSourcesList.cmake @@ -46,9 +46,9 @@ rest_target(${psd} base/frameworks/logging/writers/ascii.bro) rest_target(${psd} base/frameworks/logging/writers/dataseries.bro) rest_target(${psd} base/frameworks/logging/writers/elasticsearch.bro) rest_target(${psd} base/frameworks/logging/writers/none.bro) -rest_target(${psd} base/frameworks/metrics/cluster.bro) -rest_target(${psd} base/frameworks/metrics/main.bro) -rest_target(${psd} base/frameworks/metrics/non-cluster.bro) +rest_target(${psd} base/frameworks/measurement/cluster.bro) +rest_target(${psd} base/frameworks/measurement/main.bro) +rest_target(${psd} base/frameworks/measurement/non-cluster.bro) rest_target(${psd} base/frameworks/notice/actions/add-geodata.bro) rest_target(${psd} base/frameworks/notice/actions/drop.bro) rest_target(${psd} base/frameworks/notice/actions/email_admin.bro) diff --git a/scripts/base/frameworks/metrics/__load__.bro b/scripts/base/frameworks/measurement/__load__.bro similarity index 93% rename from scripts/base/frameworks/metrics/__load__.bro rename to scripts/base/frameworks/measurement/__load__.bro index 35f6b30fb5..fc784e1632 100644 --- a/scripts/base/frameworks/metrics/__load__.bro +++ b/scripts/base/frameworks/measurement/__load__.bro @@ -1,5 +1,7 @@ @load ./main +@load ./plugins + # The cluster framework must be loaded first. @load base/frameworks/cluster diff --git a/scripts/base/frameworks/metrics/cluster.bro b/scripts/base/frameworks/measurement/cluster.bro similarity index 83% rename from scripts/base/frameworks/metrics/cluster.bro rename to scripts/base/frameworks/measurement/cluster.bro index 721f2a212e..6ccf5bb2f9 100644 --- a/scripts/base/frameworks/metrics/cluster.bro +++ b/scripts/base/frameworks/measurement/cluster.bro @@ -7,7 +7,7 @@ @load base/frameworks/cluster @load ./main -module Metrics; +module Measurement; export { ## Allows a user to decide how large of result groups the @@ -48,13 +48,13 @@ export { global cluster_index_request: event(uid: string, id: string, filter_name: string, index: Index); # This event is sent by nodes in response to a - # :bro:id:`Metrics::cluster_index_request` event. + # :bro:id:`Measurement::cluster_index_request` event. global cluster_index_response: event(uid: string, id: string, filter_name: string, index: Index, val: ResultVal); # This is sent by workers to indicate that they crossed the percent of the # current threshold by the percentage defined globally in - # :bro:id:`Metrics::cluster_request_global_view_percent` - global cluster_index_intermediate_response: event(id: string, filter_name: string, index: Metrics::Index); + # :bro:id:`Measurement::cluster_request_global_view_percent` + global cluster_index_intermediate_response: event(id: string, filter_name: string, index: Measurement::Index); # This event is scheduled internally on workers to send result chunks. global send_data: event(uid: string, id: string, filter_name: string, data: MetricTable); @@ -62,8 +62,8 @@ export { # Add events to the cluster framework to make this work. -redef Cluster::manager2worker_events += /Metrics::cluster_(filter_request|index_request)/; -redef Cluster::worker2manager_events += /Metrics::cluster_(filter_response|index_response|index_intermediate_response)/; +redef Cluster::manager2worker_events += /Measurement::cluster_(filter_request|index_request)/; +redef Cluster::worker2manager_events += /Measurement::cluster_(filter_response|index_response|index_intermediate_response)/; @if ( Cluster::local_node_type() != Cluster::MANAGER ) # This variable is maintained to know what indexes they have recently sent as @@ -88,12 +88,12 @@ function data_added(filter: Filter, index: Index, val: ResultVal) check_thresholds(filter, index, val, cluster_request_global_view_percent) ) { # kick off intermediate update - event Metrics::cluster_index_intermediate_response(filter$id, filter$name, index); + event Measurement::cluster_index_intermediate_response(filter$id, filter$name, index); ++recent_global_view_indexes[filter$id, filter$name, index]; } } -event Metrics::send_data(uid: string, id: string, filter_name: string, data: MetricTable) +event Measurement::send_data(uid: string, id: string, filter_name: string, data: MetricTable) { #print fmt("WORKER %s: sending data for uid %s...", Cluster::node, uid); @@ -115,35 +115,35 @@ event Metrics::send_data(uid: string, id: string, filter_name: string, data: Met if ( |data| == 0 ) done = T; - event Metrics::cluster_filter_response(uid, id, filter_name, local_data, done); + event Measurement::cluster_filter_response(uid, id, filter_name, local_data, done); if ( ! done ) - event Metrics::send_data(uid, id, filter_name, data); + event Measurement::send_data(uid, id, filter_name, data); } -event Metrics::cluster_filter_request(uid: string, id: string, filter_name: string) +event Measurement::cluster_filter_request(uid: string, id: string, filter_name: string) { #print fmt("WORKER %s: received the cluster_filter_request event for %s.", Cluster::node, id); # Initiate sending all of the data for the requested filter. - event Metrics::send_data(uid, id, filter_name, store[id, filter_name]); + event Measurement::send_data(uid, id, filter_name, store[id, filter_name]); # Lookup the actual filter and reset it, the reference to the data # currently stored will be maintained internally by the send_data event. reset(filter_store[id, filter_name]); } -event Metrics::cluster_index_request(uid: string, id: string, filter_name: string, index: Index) +event Measurement::cluster_index_request(uid: string, id: string, filter_name: string, index: Index) { if ( [id, filter_name] in store && index in store[id, filter_name] ) { #print fmt("WORKER %s: received the cluster_index_request event for %s=%s.", Cluster::node, index2str(index), data); - event Metrics::cluster_index_response(uid, id, filter_name, index, store[id, filter_name][index]); + event Measurement::cluster_index_response(uid, id, filter_name, index, store[id, filter_name][index]); } else { # We need to send an empty response if we don't have the data so that the manager # can know that it heard back from all of the workers. - event Metrics::cluster_index_response(uid, id, filter_name, index, [$begin=network_time(), $end=network_time()]); + event Measurement::cluster_index_response(uid, id, filter_name, index, [$begin=network_time(), $end=network_time()]); } } @@ -177,7 +177,7 @@ global index_requests: table[string, string, string, Index] of ResultVal &read_e global outstanding_global_views: table[string, string] of count &default=0; # Managers handle logging. -event Metrics::finish_period(filter: Filter) +event Measurement::finish_period(filter: Filter) { #print fmt("%.6f MANAGER: breaking %s filter for %s metric", network_time(), filter$name, filter$id); local uid = unique_id(""); @@ -189,9 +189,9 @@ event Metrics::finish_period(filter: Filter) filter_results[uid, filter$id, filter$name] = table(); # Request data from peers. - event Metrics::cluster_filter_request(uid, filter$id, filter$name); + event Measurement::cluster_filter_request(uid, filter$id, filter$name); # Schedule the next finish_period event. - schedule filter$every { Metrics::finish_period(filter) }; + schedule filter$every { Measurement::finish_period(filter) }; } # This is unlikely to be called often, but it's here in case there are metrics @@ -202,7 +202,7 @@ function data_added(filter: Filter, index: Index, val: ResultVal) threshold_crossed(filter, index, val); } -event Metrics::cluster_index_response(uid: string, id: string, filter_name: string, index: Index, val: ResultVal) +event Measurement::cluster_index_response(uid: string, id: string, filter_name: string, index: Index, val: ResultVal) { #print fmt("%0.6f MANAGER: receiving index data from %s - %s=%s", network_time(), get_event_peer()$descr, index2str(index), val); @@ -233,7 +233,7 @@ event Metrics::cluster_index_response(uid: string, id: string, filter_name: stri } # Managers handle intermediate updates here. -event Metrics::cluster_index_intermediate_response(id: string, filter_name: string, index: Index) +event Measurement::cluster_index_intermediate_response(id: string, filter_name: string, index: Index) { #print fmt("MANAGER: receiving intermediate index data from %s", get_event_peer()$descr); #print fmt("MANAGER: requesting index data for %s", index2str(index)); @@ -250,10 +250,10 @@ event Metrics::cluster_index_intermediate_response(id: string, filter_name: stri ++outstanding_global_views[id, filter_name]; local uid = unique_id(""); - event Metrics::cluster_index_request(uid, id, filter_name, index); + event Measurement::cluster_index_request(uid, id, filter_name, index); } -event Metrics::cluster_filter_response(uid: string, id: string, filter_name: string, data: MetricTable, done: bool) +event Measurement::cluster_filter_response(uid: string, id: string, filter_name: string, data: MetricTable, done: bool) { #print fmt("MANAGER: receiving results from %s", get_event_peer()$descr); @@ -294,22 +294,6 @@ event Metrics::cluster_filter_response(uid: string, id: string, filter_name: str delete requested_results[uid]; } - if ( filter?$rollup ) - { - for ( index in local_data ) - { - if ( index !in rollup_store ) - rollup_store[index] = table(); - rollup_store[index][id, filter_name] = local_data[index]; - - # If all of the result vals are stored then the rollup callback can be executed. - if ( |rollup_store[index]| == |rollups[filter$rollup]$filters| ) - { - rollups[filter$rollup]$callback(index, rollup_store[index]); - } - } - } - if ( filter?$period_finished ) filter$period_finished(ts, filter$id, filter$name, local_data); diff --git a/scripts/base/frameworks/measurement/main.bro b/scripts/base/frameworks/measurement/main.bro new file mode 100644 index 0000000000..3809fb16cc --- /dev/null +++ b/scripts/base/frameworks/measurement/main.bro @@ -0,0 +1,367 @@ +##! The metrics framework provides a way to count and measure data. + +@load base/utils/queue + +module Measurement; + +export { + ## The metrics logging stream identifier. + redef enum Log::ID += { LOG }; + + ## This is the interval for how often threshold based notices will happen + ## after they have already fired. + const threshold_crossed_restart_interval = 1hr &redef; + + ## The various calculations are all defined as plugins. + type Calculation: enum { + PLACEHOLDER + }; + + ## Represents a thing which is having metrics collected for it. An instance + ## of this record type and an id together represent a single measurement. + type Index: record { + ## A non-address related metric or a sub-key for an address based metric. + ## An example might be successful SSH connections by client IP address + ## where the client string would be the index value. + ## Another example might be number of HTTP requests to a particular + ## value in a Host header. This is an example of a non-host based + ## metric since multiple IP addresses could respond for the same Host + ## header value. + str: string &optional; + + ## Host is the value to which this metric applies. + host: addr &optional; + } &log; + + ## Represents data being added for a single metric data point. + ## Only supply a single value here at a time. + type DataPoint: record { + ## Count value. + num: count &optional; + ## Double value. + dbl: double &optional; + ## String value. + str: string &optional; + }; + + ## Value supplied when a metric is finished. It contains all + ## of the measurements collected for the metric. Most of the + ## fields are added by calculation plugins. + type ResultVal: record { + ## The time when this result was first started. + begin: time &log; + + ## The time when the last value was added to this result. + end: time &log; + + ## The number of measurements received. + num: count &log &default=0; + + ## A sample of something being measured. This is helpful in + ## some cases for collecting information to do further detection + ## or better logging for forensic purposes. + samples: vector of string &optional; + }; + + type Measurement: record { + ## The calculations to perform on the data. + apply: set[Calculation]; + + ## A predicate so that you can decide per index if you would like + ## to accept the data being inserted. + pred: function(index: Measurement::Index, data: Measurement::DataPoint): bool &optional; + + ## A function to normalize the index. This can be used to aggregate or + ## normalize the entire index. + normalize_func: function(index: Measurement::Index): Index &optional; + + ## A number of sample DataPoints to collect. + samples: count &optional; + }; + + + type Results: record { + begin: time; + end: time; + result + }; + + ## Type to store a table of metrics result values. + type ResultTable: table[Index] of Results; + + ## Filters define how the data from a metric is aggregated and handled. + ## Filters can be used to set how often the measurements are cut + ## and logged or how the data within them is aggregated. + type Filter: record { + ## A name for the filter in case multiple filters are being + ## applied to the same metric. In most cases the default + ## filter name is fine and this field does not need to be set. + id: string; + + ## The interval at which this filter should be "broken" and written + ## to the logging stream. The counters are also reset to zero at + ## this time so any threshold based detection needs to be set to a + ## number that should be expected to happen within this period. + every: interval; + + ## Optionally provide a function to calculate a value from the ResultVal + ## structure which will be used for thresholding. If no function is + ## provided, then in the following order of preference either the + ## $unique or the $sum fields will be used. + threshold_val_func: function(val: Measurement::ResultVal): count &optional; + + ## The threshold value for calling the $threshold_crossed callback. + threshold: count &optional; + + ## A series of thresholds for calling the $threshold_crossed callback. + threshold_series: vector of count &optional; + + ## A callback with the full collection of ResultVals for this filter. + ## It's best to not access any global state outside of the variables + ## given to the callback because there is no assurance provided as to + ## where the callback will be executed on clusters. + period_finished: function(data: Measurement::ResultTable) &optional; + + ## A callback that is called when a threshold is crossed. + threshold_crossed: function(index: Measurement::Index, val: Measurement::ResultVal) &optional; + }; + + ## Function to associate a metric filter with a metric ID. + ## + ## id: The metric ID that the filter should be associated with. + ## + ## filter: The record representing the filter configuration. + global add_filter: function(id: string, filter: Measurement::Filter); + + ## Add data into a metric. This should be called when + ## a script has measured some point value and is ready to increment the + ## counters. + ## + ## id: The metric identifier that the data represents. + ## + ## index: The metric index that the value is to be added to. + ## + ## increment: How much to increment the counter by. + global add_data: function(id: string, index: Measurement::Index, data: Measurement::DataPoint); + + ## Helper function to represent a :bro:type:`Measurement::Index` value as + ## a simple string. + ## + ## index: The metric index that is to be converted into a string. + ## + ## Returns: A string reprentation of the metric index. + global index2str: function(index: Measurement::Index): string; + + ## Event to access metrics records as they are passed to the logging framework. + global log_metrics: event(rec: Measurement::Info); + +} + +redef record Filter += { + # Internal use only. The metric that this filter applies to. The value is automatically set. + id: string &optional; +}; + +redef record ResultVal += { + # Internal use only. This is the queue where samples + # are maintained since the queue is self managing for + # the number of samples requested. + sample_queue: Queue::Queue &optional; + + # Internal use only. Indicates if a simple threshold was already crossed. + is_threshold_crossed: bool &default=F; + + # Internal use only. Current index for threshold series. + threshold_series_index: count &default=0; +}; + +# Store the filters indexed on the metric identifier and filter name. +global filter_store: table[string, string] of Filter = table(); + +# This is indexed by metric id and filter name. +global store: table[string, string] of ResultTable = table(); + +# This is a hook for watching thresholds being crossed. It is called whenever +# index values are updated and the new val is given as the `val` argument. +# It's only prototyped here because cluster and non-cluster have separate +# implementations. +global data_added: function(filter: Filter, index: Index, val: ResultVal); + +# Prototype the hook point for plugins to do calculations. +global add_to_calculation: hook(filter: Filter, val: double, data: DataPoint, result: ResultVal); +# Prototype the hook point for plugins to merge Measurements. +global plugin_merge_measurements: hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal); + +# Event that is used to "finish" metrics and adapt the metrics +# framework for clustered or non-clustered usage. +global finish_period: event(filter: Measurement::Filter); + +event bro_init() &priority=5 + { + Log::create_stream(Measurement::LOG, [$columns=Info, $ev=log_metrics]); + } + +function index2str(index: Index): string + { + local out = ""; + if ( index?$host ) + out = fmt("%shost=%s", out, index$host); + if ( index?$str ) + out = fmt("%s%sstr=%s", out, |out|==0 ? "" : ", ", index$str); + return fmt("metric_index(%s)", out); + } + +function merge_result_vals(rv1: ResultVal, rv2: ResultVal): ResultVal + { + local result: ResultVal; + + # Merge $begin (take the earliest one) + result$begin = (rv1$begin < rv2$begin) ? rv1$begin : rv2$begin; + + # Merge $end (take the latest one) + result$end = (rv1$end > rv2$end) ? rv1$end : rv2$end; + + # Merge $num + result$num = rv1$num + rv2$num; + + hook plugin_merge_measurements(result, rv1, rv2); + + # Merge $sample_queue + if ( rv1?$sample_queue && rv2?$sample_queue ) + result$sample_queue = Queue::merge(rv1$sample_queue, rv2$sample_queue); + else if ( rv1?$sample_queue ) + result$sample_queue = rv1$sample_queue; + else if ( rv2?$sample_queue ) + result$sample_queue = rv2$sample_queue; + + # Merge $threshold_series_index + result$threshold_series_index = (rv1$threshold_series_index > rv2$threshold_series_index) ? rv1$threshold_series_index : rv2$threshold_series_index; + + # Merge $is_threshold_crossed + if ( rv1$is_threshold_crossed || rv2$is_threshold_crossed ) + result$is_threshold_crossed = T; + + return result; + } + +function reset(filter: Filter) + { + if ( [filter$id, filter$name] in store ) + delete store[filter$id, filter$name]; + + store[filter$id, filter$name] = table(); + } + +function add_filter(id: string, filter: Filter) + { + if ( [id, filter$name] in store ) + { + Reporter::warning(fmt("invalid Metric filter (%s): Filter with same name already exists.", filter$name)); + return; + } + + if ( ! filter?$id ) + filter$id = id; + + filter_store[id, filter$name] = filter; + store[id, filter$name] = table(); + + schedule filter$every { Measurement::finish_period(filter) }; + } + +function add_data(id: string, index: Index, data: DataPoint) + { + # Try to add the data to all of the defined filters for the metric. + for ( [metric_id, filter_id] in filter_store ) + { + local filter = filter_store[metric_id, filter_id]; + + # If this filter has a predicate, run the predicate and skip this + # index if the predicate return false. + if ( filter?$pred && ! filter$pred(index, data) ) + next; + + #if ( filter?$normalize_func ) + # index = filter$normalize_func(copy(index)); + + local metric_tbl = store[id, filter$name]; + if ( index !in metric_tbl ) + metric_tbl[index] = [$begin=network_time(), $end=network_time()]; + + local result = metric_tbl[index]; + + # If a string was given, fall back to 1.0 as the value. + local val = 1.0; + if ( data?$num || data?$dbl ) + val = data?$dbl ? data$dbl : data$num; + + ++result$num; + # Continually update the $end field. + result$end=network_time(); + + #if ( filter?$samples && filter$samples > 0 && data?$str ) + # { + # if ( ! result?$sample_queue ) + # result$sample_queue = Queue::init([$max_len=filter$samples]); + # Queue::push(result$sample_queue, data$str); + # } + + hook add_to_calculation(filter, val, data, result); + data_added(filter, index, result); + } + } + +# This function checks if a threshold has been crossed. It is also used as a method to implement +# mid-break-interval threshold crossing detection for cluster deployments. +function check_thresholds(filter: Filter, index: Index, val: ResultVal, modify_pct: double): bool + { + if ( ! (filter?$threshold || filter?$threshold_series) ) + return; + + local watch = 0.0; + if ( val?$unique ) + watch = val$unique; + else if ( val?$sum ) + watch = val$sum; + + if ( filter?$threshold_val_func ) + watch = filter$threshold_val_func(val); + + if ( modify_pct < 1.0 && modify_pct > 0.0 ) + watch = watch/modify_pct; + + if ( ! val$is_threshold_crossed && + filter?$threshold && watch >= filter$threshold ) + { + # A default threshold was given and the value crossed it. + return T; + } + + if ( filter?$threshold_series && + |filter$threshold_series| >= val$threshold_series_index && + watch >= filter$threshold_series[val$threshold_series_index] ) + { + # A threshold series was given and the value crossed the next + # value in the series. + return T; + } + + return F; + } + +function threshold_crossed(filter: Filter, index: Index, val: ResultVal) + { + if ( ! filter?$threshold_crossed ) + return; + + if ( val?$sample_queue ) + val$samples = Queue::get_str_vector(val$sample_queue); + + filter$threshold_crossed(index, val); + val$is_threshold_crossed = T; + + # Bump up to the next threshold series index if a threshold series is being used. + if ( filter?$threshold_series ) + ++val$threshold_series_index; + } + diff --git a/scripts/base/frameworks/measurement/non-cluster.bro b/scripts/base/frameworks/measurement/non-cluster.bro new file mode 100644 index 0000000000..11bb7f16dc --- /dev/null +++ b/scripts/base/frameworks/measurement/non-cluster.bro @@ -0,0 +1,21 @@ +@load ./main + +module Measurement; + +event Measurement::finish_period(filter: Filter) + { + local data = store[filter$id, filter$name]; + if ( filter?$period_finished ) + filter$period_finished(network_time(), filter$id, filter$name, data); + + reset(filter); + + schedule filter$every { Measurement::finish_period(filter) }; + } + + +function data_added(filter: Filter, index: Index, val: ResultVal) + { + if ( check_thresholds(filter, index, val, 1.0) ) + threshold_crossed(filter, index, val); + } diff --git a/scripts/base/frameworks/measurement/plugins/__load__.bro b/scripts/base/frameworks/measurement/plugins/__load__.bro new file mode 100644 index 0000000000..b708f917d1 --- /dev/null +++ b/scripts/base/frameworks/measurement/plugins/__load__.bro @@ -0,0 +1,7 @@ +@load ./average +@load ./max +@load ./min +@load ./std-dev +@load ./sum +@load ./unique +@load ./variance \ No newline at end of file diff --git a/scripts/base/frameworks/measurement/plugins/average.bro b/scripts/base/frameworks/measurement/plugins/average.bro new file mode 100644 index 0000000000..d3e1bef4d5 --- /dev/null +++ b/scripts/base/frameworks/measurement/plugins/average.bro @@ -0,0 +1,35 @@ + +module Metrics; + +export { + redef enum Calculation += { + ## Calculate the average of the values. + AVERAGE + }; + + redef record ResultVal += { + ## For numeric data, this calculates the average of all values. + average: double &log &optional; + }; +} + +hook add_to_calculation(filter: Filter, val: double, data: DataPoint, result: ResultVal) + { + if ( AVERAGE in filter$measure ) + { + if ( ! result?$average ) + result$average = val; + else + result$average += (val - result$average) / result$num; + } + } + +hook plugin_merge_measurements(result: ResultVal, rv1: ResultVal, rv2: ResultVal) + { + if ( rv1?$average && rv2?$average ) + result$average = ((rv1$average*rv1$num) + (rv2$average*rv2$num))/(rv1$num+rv2$num); + else if ( rv1?$average ) + result$average = rv1$average; + else if ( rv2?$average ) + result$average = rv2$average; + } \ No newline at end of file diff --git a/scripts/base/frameworks/measurement/plugins/max.bro b/scripts/base/frameworks/measurement/plugins/max.bro new file mode 100644 index 0000000000..806713dbd4 --- /dev/null +++ b/scripts/base/frameworks/measurement/plugins/max.bro @@ -0,0 +1,37 @@ + +module Metrics; + +export { + redef enum Calculation += { + ## Find the maximum value. + MAX + }; + + redef record ResultVal += { + ## For numeric data, this tracks the maximum value given. + max: double &log &optional; + }; +} + +hook add_to_calculation(filter: Filter, val: double, data: DataPoint, result: ResultVal) + { + if ( MAX in filter$measure ) + { + if ( ! result?$max ) + result$max = val; + else if ( val > result$max ) + result$max = val; + } + } + +hook plugin_merge_measurements(result: ResultVal, rv1: ResultVal, rv2: ResultVal) + { + if ( rv1?$max && rv2?$max ) + result$max = (rv1$max > rv2$max) ? rv1$max : rv2$max; + else if ( rv1?$max ) + result$max = rv1$max; + else if ( rv2?$max ) + result$max = rv2$max; + } + + diff --git a/scripts/base/frameworks/measurement/plugins/min.bro b/scripts/base/frameworks/measurement/plugins/min.bro new file mode 100644 index 0000000000..e0d4003b31 --- /dev/null +++ b/scripts/base/frameworks/measurement/plugins/min.bro @@ -0,0 +1,35 @@ + +module Metrics; + +export { + redef enum Calculation += { + ## Find the minimum value. + MIN + }; + + redef record ResultVal += { + ## For numeric data, this tracks the minimum value given. + min: double &log &optional; + }; +} + +hook add_to_calculation(filter: Filter, val: double, data: DataPoint, result: ResultVal) + { + if ( MIN in filter$measure ) + { + if ( ! result?$min ) + result$min = val; + else if ( val < result$min ) + result$min = val; + } + } + +hook plugin_merge_measurements(result: ResultVal, rv1: ResultVal, rv2: ResultVal) + { + if ( rv1?$min && rv2?$min ) + result$min = (rv1$min < rv2$min) ? rv1$min : rv2$min; + else if ( rv1?$min ) + result$min = rv1$min; + else if ( rv2?$min ) + result$min = rv2$min; + } \ No newline at end of file diff --git a/scripts/base/frameworks/measurement/plugins/std-dev.bro b/scripts/base/frameworks/measurement/plugins/std-dev.bro new file mode 100644 index 0000000000..cbd0db3416 --- /dev/null +++ b/scripts/base/frameworks/measurement/plugins/std-dev.bro @@ -0,0 +1,36 @@ +@load ./sum +@load ./variance + +module Metrics; + +export { + redef enum Calculation += { + ## Find the standard deviation of the values. + STD_DEV + }; + + redef record ResultVal += { + ## For numeric data, this calculates the standard deviation. + std_dev: double &log &optional; + }; +} + +# This depends on the variance plugin which uses priority -5 +hook add_to_calculation(filter: Filter, val: double, data: DataPoint, result: ResultVal) &priority=-10 + { + if ( STD_DEV in filter$measure ) + { + if ( result?$variance ) + result$std_dev = sqrt(result$variance); + } + } + +hook plugin_merge_measurements(result: ResultVal, rv1: ResultVal, rv2: ResultVal) &priority=-10 + { + if ( rv1?$sum || rv2?$sum ) + { + result$sum = rv1?$sum ? rv1$sum : 0; + if ( rv2?$sum ) + result$sum += rv2$sum; + } + } \ No newline at end of file diff --git a/scripts/base/frameworks/measurement/plugins/sum.bro b/scripts/base/frameworks/measurement/plugins/sum.bro new file mode 100644 index 0000000000..2f615ffb6c --- /dev/null +++ b/scripts/base/frameworks/measurement/plugins/sum.bro @@ -0,0 +1,35 @@ + +module Metrics; + +export { + redef enum Calculation += { + ## Sums the values given. For string values, + ## this will be the number of strings given. + SUM + }; + + redef record ResultVal += { + ## For numeric data, this tracks the sum of all values. + sum: double &log &optional; + }; +} + +hook add_to_calculation(filter: Filter, val: double, data: DataPoint, result: ResultVal) + { + if ( SUM in filter$measure ) + { + if ( ! result?$sum ) + result$sum = 0; + result$sum += val; + } + } + +hook plugin_merge_measurements(result: ResultVal, rv1: ResultVal, rv2: ResultVal) + { + if ( rv1?$sum || rv2?$sum ) + { + result$sum = rv1?$sum ? rv1$sum : 0; + if ( rv2?$sum ) + result$sum += rv2$sum; + } + } \ No newline at end of file diff --git a/scripts/base/frameworks/measurement/plugins/unique.bro b/scripts/base/frameworks/measurement/plugins/unique.bro new file mode 100644 index 0000000000..66cab47897 --- /dev/null +++ b/scripts/base/frameworks/measurement/plugins/unique.bro @@ -0,0 +1,51 @@ + +module Metrics; + +export { + redef enum Calculation += { + ## Calculate the number of unique values. + UNIQUE + }; + + redef record ResultVal += { + ## If cardinality is being tracked, the number of unique + ## items is tracked here. + unique: count &log &optional; + }; +} + +redef record ResultVal += { + # Internal use only. This is not meant to be publically available + # because we don't want to trust that we can inspect the values + # since we will like move to a probalistic data structure in the future. + # TODO: in the future this will optionally be a hyperloglog structure + unique_vals: set[DataPoint] &optional; +}; + +hook add_to_calculation(filter: Filter, val: double, data: DataPoint, result: ResultVal) + { + if ( UNIQUE in filter$measure ) + { + if ( ! result?$unique_vals ) + result$unique_vals=set(); + add result$unique_vals[data]; + } + } + +hook plugin_merge_measurements(result: ResultVal, rv1: ResultVal, rv2: ResultVal) + { + if ( rv1?$unique_vals || rv2?$unique_vals ) + { + if ( rv1?$unique_vals ) + result$unique_vals = rv1$unique_vals; + + if ( rv2?$unique_vals ) + if ( ! result?$unique_vals ) + result$unique_vals = rv2$unique_vals; + else + for ( val2 in rv2$unique_vals ) + add result$unique_vals[val2]; + + result$unique = |result$unique_vals|; + } + } \ No newline at end of file diff --git a/scripts/base/frameworks/measurement/plugins/variance.bro b/scripts/base/frameworks/measurement/plugins/variance.bro new file mode 100644 index 0000000000..df83361c35 --- /dev/null +++ b/scripts/base/frameworks/measurement/plugins/variance.bro @@ -0,0 +1,65 @@ +@load ./average + +module Metrics; + +export { + redef enum Calculation += { + ## Find the variance of the values. + VARIANCE + }; + + redef record ResultVal += { + ## For numeric data, this calculates the variance. + variance: double &log &optional; + }; +} + +redef record ResultVal += { + # Internal use only. Used for incrementally calculating variance. + prev_avg: double &optional; + + # Internal use only. For calculating incremental variance. + var_s: double &optional; +}; + +hook add_to_calculation(filter: Filter, val: double, data: DataPoint, result: ResultVal) &priority=5 + { + if ( VARIANCE in filter$measure ) + result$prev_avg = result$average; + } + +# Reduced priority since this depends on the average +hook add_to_calculation(filter: Filter, val: double, data: DataPoint, result: ResultVal) &priority=-5 + { + if ( VARIANCE in filter$measure ) + { + if ( ! result?$var_s ) + result$var_s = 0.0; + result$var_s += (val - result$prev_avg) * (val - result$average); + result$variance = (val > 0) ? result$var_s/val : 0.0; + } + } + +# Reduced priority since this depends on the average +hook plugin_merge_measurements(result: ResultVal, rv1: ResultVal, rv2: ResultVal) &priority=-5 + { + if ( rv1?$var_s && rv2?$var_s ) + { + local rv1_avg_sq = (rv1$average - result$average); + rv1_avg_sq = rv1_avg_sq*rv1_avg_sq; + local rv2_avg_sq = (rv2$average - result$average); + rv2_avg_sq = rv2_avg_sq*rv2_avg_sq; + result$var_s = rv1$num*(rv1$var_s/rv1$num + rv1_avg_sq) + rv2$num*(rv2$var_s/rv2$num + rv2_avg_sq); + } + else if ( rv1?$var_s ) + result$var_s = rv1$var_s; + else if ( rv2?$var_s ) + result$var_s = rv2$var_s; + + if ( rv1?$prev_avg && rv2?$prev_avg ) + result$prev_avg = ((rv1$prev_avg*rv1$num) + (rv2$prev_avg*rv2$num))/(rv1$num+rv2$num); + else if ( rv1?$prev_avg ) + result$prev_avg = rv1$prev_avg; + else if ( rv2?$prev_avg ) + result$prev_avg = rv2$prev_avg; + } \ No newline at end of file diff --git a/scripts/base/frameworks/measurement/simple.bro b/scripts/base/frameworks/measurement/simple.bro new file mode 100644 index 0000000000..51bf7e8c44 --- /dev/null +++ b/scripts/base/frameworks/measurement/simple.bro @@ -0,0 +1,6 @@ + +module Metrics; + +export { + +} \ No newline at end of file diff --git a/scripts/base/frameworks/metrics/main.bro b/scripts/base/frameworks/metrics/main.bro deleted file mode 100644 index 6101d1cf4a..0000000000 --- a/scripts/base/frameworks/metrics/main.bro +++ /dev/null @@ -1,664 +0,0 @@ -##! The metrics framework provides a way to count and measure data. - -@load base/utils/queue - -module Metrics; - -export { - ## The metrics logging stream identifier. - redef enum Log::ID += { LOG }; - - ## This is the interval for how often threshold based notices will happen - ## after they have already fired. - const threshold_crossed_restart_interval = 1hr &redef; - - type Calculation: enum { - ## Sums the values given. For string values, - ## this will be the number of strings given. - SUM, - ## Find the minimum value. - MIN, - ## Find the maximum value. - MAX, - ## Find the variance of the values. - VARIANCE, - ## Find the standard deviation of the values. - STD_DEV, - ## Calculate the average of the values. - AVG, - ## Calculate the number of unique values. - UNIQUE, - }; - - ## Represents a thing which is having metrics collected for it. An instance - ## of this record type and an id together represent a single measurement. - type Index: record { - ## A non-address related metric or a sub-key for an address based metric. - ## An example might be successful SSH connections by client IP address - ## where the client string would be the index value. - ## Another example might be number of HTTP requests to a particular - ## value in a Host header. This is an example of a non-host based - ## metric since multiple IP addresses could respond for the same Host - ## header value. - str: string &optional; - - ## Host is the value to which this metric applies. - host: addr &optional; - - ## The CIDR block that this metric applies to. This is typically - ## only used internally for host based aggregation. - network: subnet &optional; - } &log; - - ## Represents data being added for a single metric data point. - ## Only supply a single value here at a time. - type DataPoint: record { - ## Count value. - num: count &optional; - ## Double value. - dbl: double &optional; - ## String value. - str: string &optional; - }; - - ## Value supplied when a metric is finished. It contains all - ## of the measurements collected for the metric. - type ResultVal: record { - ## The time when this result was first started. - begin: time &log; - - ## The time when the last value was added to this result. - end: time &log; - - ## The number of measurements received. - num: count &log &default=0; - - ## For numeric data, this tracks the sum of all values. - sum: double &log &optional; - - ## For numeric data, this tracks the minimum value given. - min: double &log &optional; - - ## For numeric data, this tracks the maximum value given. - max: double &log &optional; - - ## For numeric data, this calculates the average of all values. - avg: double &log &optional; - - ## For numeric data, this calculates the variance. - variance: double &log &optional; - - ## For numeric data, this calculates the standard deviation. - std_dev: double &log &optional; - - ## If cardinality is being tracked, the number of unique - ## items is tracked here. - unique: count &log &optional; - - ## A sample of something being measured. This is helpful in - ## some cases for collecting information to do further detection - ## or better logging for forensic purposes. - samples: vector of string &optional; - }; - - ## The record type that is used for logging metrics. - type Info: record { - ## Timestamp at which the metric was "broken". - ts: time &log; - ## Interval between logging of this filter and the last time it was logged. - ts_delta: interval &log; - ## What measurement the metric represents. - metric: string &log; - ## What the metric value applies to. - index: Index &log; - ## The simple numeric value of the metric. - result: ResultVal &log; - }; - - ## Type to store a table of metrics result values. - type MetricTable: table[Index] of ResultVal; - - ## Filters define how the data from a metric is aggregated and handled. - ## Filters can be used to set how often the measurements are cut - ## and logged or how the data within them is aggregated. It's also - ## possible to disable logging and use filters solely for thresholding. - type Filter: record { - ## A name for the filter in case multiple filters are being - ## applied to the same metric. In most cases the default - ## filter name is fine and this field does not need to be set. - name: string &default="default"; - - ## The interval at which this filter should be "broken" and written - ## to the logging stream. The counters are also reset to zero at - ## this time so any threshold based detection needs to be set to a - ## number that should be expected to happen within this period. - every: interval; - - ## The measurements to perform on the data. - measure: set[Calculation] &optional; - - ## A predicate so that you can decide per index if you would like - ## to accept the data being inserted. - pred: function(index: Metrics::Index, data: Metrics::DataPoint): bool &optional; - - ## A function to normalize the index. This can be used to aggregate or - ## normalize the entire index. - normalize_func: function(index: Metrics::Index): Index &optional; - - ## Global mask by to aggregate traffic measuring an attribute of hosts. - ## This is a special case of the normalize_func. - aggregation_mask: count &optional; - - ## Optionally provide a function to calculate a value from the ResultVal - ## structure which will be used for thresholding. If no function is - ## provided, then in the following order of preference either the - ## $unique or the $sum fields will be used. - threshold_val_func: function(val: Metrics::ResultVal): count &optional; - - ## A direct threshold for calling the $threshold_crossed function when - ## the SUM is greater than or equal to this value. - threshold: count &optional; - - ## A series of thresholds for calling the $threshold_crossed function. - threshold_series: vector of count &optional; - - ## A predicate so that you can decide when to flexibly declare when - ## a threshold crossed, and do extra work. - threshold_func: function(index: Metrics::Index, val: Metrics::ResultVal): bool &optional; - - ## A callback with the full collection of ResultVals for this filter. This - ## is defined as a redef because the function includes a :bro:type:`Filter` - ## record which is self referential before the Filter type has been fully - ## defined and doesn't work. - period_finished: function(ts: time, metric_name: string, filter_name: string, data: Metrics::MetricTable) &optional; - - ## A callback that is called when a threshold is crossed. - threshold_crossed: function(index: Metrics::Index, val: Metrics::ResultVal) &optional; - - ## A rollup to register this filter with. - rollup: string &optional; - - ## A number of sample DataPoint strings to collect for the threshold - ## crossing callback. - samples: count &optional; - }; - - ## Function to associate a metric filter with a metric ID. - ## - ## id: The metric ID that the filter should be associated with. - ## - ## filter: The record representing the filter configuration. - global add_filter: function(id: string, filter: Metrics::Filter); - - ## Add data into a metric. This should be called when - ## a script has measured some point value and is ready to increment the - ## counters. - ## - ## id: The metric identifier that the data represents. - ## - ## index: The metric index that the value is to be added to. - ## - ## increment: How much to increment the counter by. - global add_data: function(id: string, index: Metrics::Index, data: Metrics::DataPoint); - - ## The callback definition for rollup functions. - type RollupCallback: function(index: Metrics::Index, vals: table[string, string] of Metrics::ResultVal); - - ## Add a rollup function for merging multiple filters with matching - ## indexes. If the metrics filters being merged don't have equivalent times - ## in the $every field, an error will be generated. - ## - ## name: An arbitrary name for this filter rollup. - ## - ## vals: Each ResultVal record indexed by the appropriate metric name and filter name. - global create_index_rollup: function(name: string, rollup: RollupCallback); - - ## Helper function to represent a :bro:type:`Metrics::Index` value as - ## a simple string. - ## - ## index: The metric index that is to be converted into a string. - ## - ## Returns: A string reprentation of the metric index. - global index2str: function(index: Metrics::Index): string; - - ## A helper function to use with the `period_finished` field in filters. Using - ## this function is not recommended however since each metric likely has - ## different data and different semantics which would be better served by writing - ## a custom function that logs in more domain specific fashion. - global write_log: function(ts: time, metric_name: string, filter_name: string, data: Metrics::MetricTable); - - ## Event to access metrics records as they are passed to the logging framework. - global log_metrics: event(rec: Metrics::Info); - -} - -redef record Filter += { - # Internal use only. The metric that this filter applies to. The value is automatically set. - id: string &optional; -}; - -redef record ResultVal += { - # Internal use only. Used for incrementally calculating variance. - prev_avg: double &optional; - - # Internal use only. For calculating variance. - var_s: double &optional; - - # Internal use only. This is not meant to be publically available - # because we don't want to trust that we can inspect the values - # since we will like move to a probalistic data structure in the future. - # TODO: in the future this will optionally be a hyperloglog structure - unique_vals: set[DataPoint] &optional; - - # Internal use only. This is the queue where samples - # are maintained since the queue is self managing for - # the number of samples requested. - sample_queue: Queue::Queue &optional; - - # Internal use only. Indicates if a simple threshold was already crossed. - is_threshold_crossed: bool &default=F; - - # Internal use only. Current index for threshold series. - threshold_series_index: count &default=0; -}; - -# Store the filters indexed on the metric identifier. -global metric_filters: table[string] of vector of Filter = table(); - -# Store the filters indexed on the metric identifier and filter name. -global filter_store: table[string, string] of Filter = table(); - -# This is indexed by metric id and filter name. -global store: table[string, string] of MetricTable = table(); - -# This is a hook for watching thresholds being crossed. It is called whenever -# index values are updated and the new val is given as the `val` argument. -# It's only prototyped here because cluster and non-cluster have separate -# implementations. -global data_added: function(filter: Filter, index: Index, val: ResultVal); - -type Rollup: record { - callback: RollupCallback; - filters: set[Filter] &optional; -}; -global rollups: table[string] of Rollup; -global rollup_store: table[Index] of table[string, string] of ResultVal = {}; - - -## Event that is used to "finish" metrics and adapt the metrics -## framework for clustered or non-clustered usage. -global finish_period: event(filter: Metrics::Filter); - -event bro_init() &priority=5 - { - Log::create_stream(Metrics::LOG, [$columns=Info, $ev=log_metrics]); - } - -function index2str(index: Index): string - { - local out = ""; - if ( index?$host ) - out = fmt("%shost=%s", out, index$host); - if ( index?$network ) - out = fmt("%s%snetwork=%s", out, |out|==0 ? "" : ", ", index$network); - if ( index?$str ) - out = fmt("%s%sstr=%s", out, |out|==0 ? "" : ", ", index$str); - return fmt("metric_index(%s)", out); - } - -function do_calculated_fields(val: ResultVal) - { - if ( val?$unique_vals ) - val$unique = |val$unique_vals|; - if ( val?$var_s ) - val$variance = (val$num > 1) ? val$var_s/val$num : 0.0; - if ( val?$variance ) - val$std_dev = sqrt(val$variance); - } - -function merge_result_vals(rv1: ResultVal, rv2: ResultVal): ResultVal - { - local result: ResultVal; - - # Merge $begin (take the earliest one) - result$begin = (rv1$begin < rv2$begin) ? rv1$begin : rv2$begin; - - # Merge $end (take the latest one) - result$end = (rv1$end > rv2$end) ? rv1$end : rv2$end; - - # Merge $num - result$num = rv1$num + rv2$num; - - # Merge $sum - if ( rv1?$sum || rv2?$sum ) - { - result$sum = rv1?$sum ? rv1$sum : 0; - if ( rv2?$sum ) - result$sum += rv2$sum; - } - - # Merge $max - if ( rv1?$max && rv2?$max ) - result$max = (rv1$max > rv2$max) ? rv1$max : rv2$max; - else if ( rv1?$max ) - result$max = rv1$max; - else if ( rv2?$max ) - result$max = rv2$max; - - # Merge $min - if ( rv1?$min && rv2?$min ) - result$min = (rv1$min < rv2$min) ? rv1$min : rv2$min; - else if ( rv1?$min ) - result$min = rv1$min; - else if ( rv2?$min ) - result$min = rv2$min; - - # Merge $avg - if ( rv1?$avg && rv2?$avg ) - result$avg = ((rv1$avg*rv1$num) + (rv2$avg*rv2$num))/(rv1$num+rv2$num); - else if ( rv1?$avg ) - result$avg = rv1$avg; - else if ( rv2?$avg ) - result$avg = rv2$avg; - - # Merge $prev_avg - if ( rv1?$prev_avg && rv2?$prev_avg ) - result$prev_avg = ((rv1$prev_avg*rv1$num) + (rv2$prev_avg*rv2$num))/(rv1$num+rv2$num); - else if ( rv1?$prev_avg ) - result$prev_avg = rv1$prev_avg; - else if ( rv2?$prev_avg ) - result$prev_avg = rv2$prev_avg; - - # Merge $var_s - if ( rv1?$var_s && rv2?$var_s ) - { - local rv1_avg_sq = (rv1$avg - result$avg); - rv1_avg_sq = rv1_avg_sq*rv1_avg_sq; - local rv2_avg_sq = (rv2$avg - result$avg); - rv2_avg_sq = rv2_avg_sq*rv2_avg_sq; - result$var_s = rv1$num*(rv1$var_s/rv1$num + rv1_avg_sq) + rv2$num*(rv2$var_s/rv2$num + rv2_avg_sq); - } - else if ( rv1?$var_s ) - result$var_s = rv1$var_s; - else if ( rv2?$var_s ) - result$var_s = rv2$var_s; - - # Merge $unique_vals - if ( rv1?$unique_vals || rv2?$unique_vals ) - { - if ( rv1?$unique_vals ) - result$unique_vals = rv1$unique_vals; - - if ( rv2?$unique_vals ) - if ( ! result?$unique_vals ) - result$unique_vals = rv2$unique_vals; - else - for ( val2 in rv2$unique_vals ) - add result$unique_vals[val2]; - } - - # Merge $sample_queue - if ( rv1?$sample_queue && rv2?$sample_queue ) - result$sample_queue = Queue::merge(rv1$sample_queue, rv2$sample_queue); - else if ( rv1?$sample_queue ) - result$sample_queue = rv1$sample_queue; - else if ( rv2?$sample_queue ) - result$sample_queue = rv2$sample_queue; - - # Merge $threshold_series_index - result$threshold_series_index = (rv1$threshold_series_index > rv2$threshold_series_index) ? rv1$threshold_series_index : rv2$threshold_series_index; - - # Merge $is_threshold_crossed - if ( rv1$is_threshold_crossed || rv2$is_threshold_crossed ) - result$is_threshold_crossed = T; - - do_calculated_fields(result); - return result; - } - -function write_log(ts: time, metric_name: string, filter_name: string, data: Metrics::MetricTable) - { - local filter = filter_store[metric_name, filter_name]; - for ( index in data ) - { - local m: Info = [$ts=ts, - $ts_delta=filter$every, - $metric=filter$id, - $index=index, - $result=data[index]]; - Log::write(LOG, m); - } - } - -function reset(filter: Filter) - { - if ( [filter$id, filter$name] in store ) - delete store[filter$id, filter$name]; - - store[filter$id, filter$name] = table(); - } - -function add_filter(id: string, filter: Filter) - { - if ( filter?$normalize_func && filter?$aggregation_mask ) - { - Reporter::warning(fmt("invalid Metric filter (%s): Defined both $normalize_func and $aggregation_mask.", filter$name)); - return; - } - if ( [id, filter$name] in store ) - { - Reporter::warning(fmt("invalid Metric filter (%s): Filter with same name already exists.", filter$name)); - return; - } - if ( filter?$rollup ) - { - if ( filter$rollup !in rollups ) - { - Reporter::warning(fmt("invalid Metric filter (%s): %s rollup doesn't exist.", filter$name, filter$rollup)); - return; - } - else - { - local every_field = 0secs; - for ( filt in rollups ) - { - if ( [id, filt] !in filter_store ) - next; - - if ( every_field == 0secs ) - every_field = filter_store[id, filt]$every; - else if ( every_field == filter_store[id, filt]$every ) - { - Reporter::warning(fmt("invalid Metric rollup for %s: Filters with differing $every fields applied to %s.", filter$name, filter$rollup)); - return; - } - } - } - add rollups[filter$rollup]$filters[filter]; - } - - if ( ! filter?$id ) - filter$id = id; - - if ( id !in metric_filters ) - metric_filters[id] = vector(); - metric_filters[id][|metric_filters[id]|] = filter; - - filter_store[id, filter$name] = filter; - store[id, filter$name] = table(); - - schedule filter$every { Metrics::finish_period(filter) }; - } - -function add_data(id: string, index: Index, data: DataPoint) - { - if ( id !in metric_filters ) - return; - - local filters = metric_filters[id]; - - # Try to add the data to all of the defined filters for the metric. - for ( filter_id in filters ) - { - local filter = filters[filter_id]; - - # If this filter has a predicate, run the predicate and skip this - # index if the predicate return false. - if ( filter?$pred && ! filter$pred(index, data) ) - next; - - if ( filter?$normalize_func ) - index = filter$normalize_func(copy(index)); - - if ( index?$host && filter?$aggregation_mask ) - { - index$network = mask_addr(index$host, filter$aggregation_mask); - delete index$host; - } - - local metric_tbl = store[id, filter$name]; - if ( index !in metric_tbl ) - metric_tbl[index] = [$begin=network_time(), $end=network_time()]; - - local result = metric_tbl[index]; - - # If a string was given, fall back to 1.0 as the value. - local val = 1.0; - if ( data?$num || data?$dbl ) - val = data?$dbl ? data$dbl : data$num; - - ++result$num; - # Continually update the $end field. - result$end=network_time(); - - if ( filter?$samples && filter$samples > 0 && data?$str ) - { - if ( ! result?$sample_queue ) - result$sample_queue = Queue::init([$max_len=filter$samples]); - Queue::push(result$sample_queue, data$str); - } - - if ( SUM in filter$measure ) - { - if ( ! result?$sum ) result$sum = 0; - result$sum += val; - } - - if ( MIN in filter$measure ) - { - if ( ! result?$min ) - result$min = val; - else if ( val < result$min ) - result$min = val; - } - - if ( MAX in filter$measure ) - { - if ( ! result?$max ) - result$max = val; - else if ( val > result$max ) - result$max = val; - } - - if ( AVG in filter$measure || VARIANCE in filter$measure ) - { - if ( ! result?$avg ) - { - result$avg = val; - result$prev_avg = val; - } - else - { - result$prev_avg = result$avg; - result$avg += (val - result$avg) / result$num; - } - } - - if ( VARIANCE in filter$measure ) - { - if ( ! result?$var_s ) result$var_s = 0.0; - result$var_s += (val - result$prev_avg)*(val - result$avg); - } - - #if ( STD_DEV in filter$measure ) - # { - # #if ( result?$variance ) - # # result$std_dev = sqrt(result$variance); - # } - - if ( UNIQUE in filter$measure ) - { - if ( ! result?$unique_vals ) result$unique_vals=set(); - add result$unique_vals[data]; - } - - do_calculated_fields(result); - data_added(filter, index, result); - } - } - -# This function checks if a threshold has been crossed. It is also used as a method to implement -# mid-break-interval threshold crossing detection for cluster deployments. -function check_thresholds(filter: Filter, index: Index, val: ResultVal, modify_pct: double): bool - { - local watch = 0.0; - if ( val?$unique ) - watch = val$unique; - else if ( val?$sum ) - watch = val$sum; - - if ( filter?$threshold_val_func ) - watch = filter$threshold_val_func(val); - - if ( modify_pct < 1.0 && modify_pct > 0.0 ) - watch = watch/modify_pct; - - if ( ! val$is_threshold_crossed && - filter?$threshold && watch >= filter$threshold ) - { - # A default threshold was given and the value crossed it. - return T; - } - - if ( filter?$threshold_series && - |filter$threshold_series| >= val$threshold_series_index && - watch >= filter$threshold_series[val$threshold_series_index] ) - { - # A threshold series was given and the value crossed the next - # value in the series. - return T; - } - - if ( ! val$is_threshold_crossed && - filter?$threshold_func && - filter$threshold_func(index, val) ) - { - # The threshold function indicated it was crossed. - return T; - } - - return F; - } - -function threshold_crossed(filter: Filter, index: Index, val: ResultVal) - { - if ( ! filter?$threshold_crossed ) - return; - - if ( val?$sample_queue ) - val$samples = Queue::get_str_vector(val$sample_queue); - - filter$threshold_crossed(index, val); - val$is_threshold_crossed = T; - - # Bump up to the next threshold series index if a threshold series is being used. - if ( filter?$threshold_series ) - ++val$threshold_series_index; - } - -function create_index_rollup(name: string, rollup: RollupCallback) - { - local r: Rollup = [$callback=rollup]; - r$filters=set(); - rollups[name] = r; - } diff --git a/scripts/base/frameworks/metrics/non-cluster.bro b/scripts/base/frameworks/metrics/non-cluster.bro deleted file mode 100644 index b76ca3ea48..0000000000 --- a/scripts/base/frameworks/metrics/non-cluster.bro +++ /dev/null @@ -1,37 +0,0 @@ -@load ./main - -module Metrics; - -event Metrics::finish_period(filter: Filter) - { - local data = store[filter$id, filter$name]; - if ( filter?$rollup ) - { - for ( index in data ) - { - if ( index !in rollup_store ) - rollup_store[index] = table(); - rollup_store[index][filter$id, filter$name] = data[index]; - - # If all of the result vals are stored then the rollup callback can be executed. - if ( |rollup_store[index]| == |rollups[filter$rollup]$filters| ) - { - rollups[filter$rollup]$callback(index, rollup_store[index]); - } - } - } - - if ( filter?$period_finished ) - filter$period_finished(network_time(), filter$id, filter$name, data); - - reset(filter); - - schedule filter$every { Metrics::finish_period(filter) }; - } - - -function data_added(filter: Filter, index: Index, val: ResultVal) - { - if ( check_thresholds(filter, index, val, 1.0) ) - threshold_crossed(filter, index, val); - } diff --git a/scripts/base/init-default.bro b/scripts/base/init-default.bro index 35e78531fc..bb64accaea 100644 --- a/scripts/base/init-default.bro +++ b/scripts/base/init-default.bro @@ -29,7 +29,7 @@ @load base/frameworks/communication @load base/frameworks/control @load base/frameworks/cluster -@load base/frameworks/metrics +@load base/frameworks/measurement @load base/frameworks/intel @load base/frameworks/reporter @load base/frameworks/tunnels diff --git a/scripts/base/protocols/ssh/main.bro b/scripts/base/protocols/ssh/main.bro index cd20f4e913..d782c4fa37 100644 --- a/scripts/base/protocols/ssh/main.bro +++ b/scripts/base/protocols/ssh/main.bro @@ -17,12 +17,6 @@ export { ## The SSH protocol logging stream identifier. redef enum Log::ID += { LOG }; - redef enum Notice::Type += { - ## Indicates that a heuristically detected "successful" SSH - ## authentication occurred. - Login - }; - type Info: record { ## Time when the SSH connection began. ts: time &log; @@ -30,9 +24,9 @@ export { uid: string &log; ## The connection's 4-tuple of endpoint addresses/ports. id: conn_id &log; - ## Indicates if the login was heuristically guessed to be "success" - ## or "failure". - status: string &log &optional; + ## Indicates if the login was heuristically guessed to be "success", + ## "failure", or "undetermined". + status: string &log &default="undetermined"; ## Direction of the connection. If the client was a local host ## logging into an external host, this would be OUTBOUND. INBOUND ## would be set for the opposite situation. @@ -54,12 +48,12 @@ export { ## The size in bytes of data sent by the server at which the SSH ## connection is presumed to be successful. - const authentication_data_size = 5500 &redef; + const authentication_data_size = 4000 &redef; ## If true, we tell the event engine to not look at further data ## packets after the initial SSH handshake. Helps with performance ## (especially with large file transfers) but precludes some - ## kinds of analyses (e.g., tracking connection size). + ## kinds of analyses. const skip_processing_after_detection = F &redef; ## Event that is generated when the heuristic thinks that a login @@ -104,54 +98,60 @@ function set_session(c: connection) function check_ssh_connection(c: connection, done: bool) { - # If done watching this connection, just return. + # If already done watching this connection, just return. if ( c$ssh$done ) return; - # Make sure conn_size_analyzer is active by checking - # resp$num_bytes_ip. In general it should always be active though. - if ( ! c$resp?$num_bytes_ip ) - return; - - # Remove the IP and TCP header length from the total size. - # TODO: Fix for IPv6. This whole approach also seems to break in some - # cases where there are more header bytes than num_bytes_ip. - local header_bytes = c$resp$num_pkts*32 + c$resp$num_pkts*20; - local server_bytes = c$resp$num_bytes_ip; - if ( server_bytes >= header_bytes ) - server_bytes = server_bytes - header_bytes; - else - server_bytes = c$resp$size; - - # If this is still a live connection and the byte count has not crossed - # the threshold, just return and let the rescheduled check happen later. - if ( ! done && server_bytes < authentication_data_size ) - return; - - # Make sure the server has sent back more than 50 bytes to filter out - # hosts that are just port scanning. Nothing is ever logged if the server - # doesn't send back at least 50 bytes. - if ( server_bytes < 50 ) - return; - - c$ssh$direction = Site::is_local_addr(c$id$orig_h) ? OUTBOUND : INBOUND; - c$ssh$resp_size = server_bytes; - - if ( server_bytes < authentication_data_size ) + if ( done ) { - c$ssh$status = "failure"; - event SSH::heuristic_failed_login(c); + # If this connection is done, then we can look to see if + # this matches the conditions for a failed login. Failed + # logins are only detected at connection state removal. + + if ( # Require originators to have sent at least 50 bytes. + c$orig$size > 50 && + # Responders must be below 4000 bytes. + c$resp$size < 4000 && + # Responder must have sent fewer than 40 packets. + c$resp$num_pkts < 40 && + # If there was a content gap we can't reliably do this heuristic. + c$conn$missed_bytes == 0)# && + # Only "normal" connections can count. + #c$conn?$conn_state && c$conn$conn_state in valid_states ) + { + c$ssh$status = "failure"; + event SSH::heuristic_failed_login(c); + } + + if ( c$resp$size > authentication_data_size ) + { + c$ssh$status = "success"; + event SSH::heuristic_successful_login(c); + } } else - { - # presumed successful login - c$ssh$status = "success"; - event SSH::heuristic_successful_login(c); + { + # If this connection is still being tracked, then it's possible + # to watch for it to be a successful connection. + if ( c$resp$size > authentication_data_size ) + { + c$ssh$status = "success"; + event SSH::heuristic_successful_login(c); + } + else + # This connection must be tracked longer. Let the scheduled + # check happen again. + return; } + + # Set the direction for the log. + c$ssh$direction = Site::is_local_addr(c$id$orig_h) ? OUTBOUND : INBOUND; # Set the "done" flag to prevent the watching event from rescheduling # after detection is done. c$ssh$done=T; + + Log::write(SSH::LOG, c$ssh); if ( skip_processing_after_detection ) { @@ -161,18 +161,6 @@ function check_ssh_connection(c: connection, done: bool) } } -event SSH::heuristic_successful_login(c: connection) &priority=-5 - { - NOTICE([$note=Login, - $msg="Heuristically detected successful SSH login.", - $conn=c]); - - Log::write(SSH::LOG, c$ssh); - } -event SSH::heuristic_failed_login(c: connection) &priority=-5 - { - Log::write(SSH::LOG, c$ssh); - } event connection_state_remove(c: connection) &priority=-5 { diff --git a/scripts/policy/frameworks/metrics/conn-example.bro b/scripts/policy/frameworks/metrics/conn-example.bro index 1271d6eb32..3f87ecb283 100644 --- a/scripts/policy/frameworks/metrics/conn-example.bro +++ b/scripts/policy/frameworks/metrics/conn-example.bro @@ -1,7 +1,7 @@ ##! An example of using the metrics framework to collect connection metrics ##! aggregated into /24 CIDR ranges. -@load base/frameworks/metrics +@load base/frameworks/measurement @load base/utils/site event bro_init() diff --git a/scripts/policy/frameworks/metrics/http-example.bro b/scripts/policy/frameworks/metrics/http-example.bro index b3284580e8..d7aa304754 100644 --- a/scripts/policy/frameworks/metrics/http-example.bro +++ b/scripts/policy/frameworks/metrics/http-example.bro @@ -2,7 +2,7 @@ ##! only local networks. Additionally, the status code for the response from ##! the request is added into the metric. -@load base/frameworks/metrics +@load base/frameworks/measurement @load base/protocols/http @load base/utils/site diff --git a/scripts/policy/frameworks/metrics/ssl-example.bro b/scripts/policy/frameworks/metrics/ssl-example.bro index 3b9b848edb..400373c06c 100644 --- a/scripts/policy/frameworks/metrics/ssl-example.bro +++ b/scripts/policy/frameworks/metrics/ssl-example.bro @@ -3,7 +3,7 @@ ##! establishments. Names ending in google.com are being filtered out as an ##! example of the predicate based filtering in metrics filters. -@load base/frameworks/metrics +@load base/frameworks/measurement @load base/protocols/ssl event bro_init() diff --git a/scripts/policy/misc/app-metrics.bro b/scripts/policy/misc/app-metrics.bro index 0a4fc8b39f..68deddaa29 100644 --- a/scripts/policy/misc/app-metrics.bro +++ b/scripts/policy/misc/app-metrics.bro @@ -1,8 +1,8 @@ @load base/protocols/http @load base/protocols/ssl -@load base/frameworks/metrics +@load base/frameworks/measurement -module AppMetrics; +module AppMeasurement; export { redef enum Log::ID += { LOG }; @@ -19,7 +19,11 @@ export { const break_interval = 15mins &redef; } -function app_metrics_rollup(index: Metrics::Index, vals: table[string, string] of Metrics::ResultVal) +redef record connection += { + resp_hostname: string &optional; +}; + +function app_metrics_rollup(index: Measurement::Index, vals: table[string, string] of Measurement::ResultVal) { local l: Info; l$ts = network_time(); @@ -35,55 +39,67 @@ function app_metrics_rollup(index: Metrics::Index, vals: table[string, string] o l$uniq_hosts = val$unique; } } - Log::write(LOG, l); } event bro_init() &priority=3 { - Log::create_stream(AppMetrics::LOG, [$columns=Info]); + Log::create_stream(AppMeasurement::LOG, [$columns=Info]); - Metrics::create_index_rollup("AppMetrics", app_metrics_rollup); - Metrics::add_filter("apps.bytes", [$every=break_interval, $measure=set(Metrics::SUM), $rollup="AppMetrics"]); - Metrics::add_filter("apps.hits", [$every=break_interval, $measure=set(Metrics::UNIQUE), $rollup="AppMetrics"]); + #Measurement::create_index_rollup("AppMeasurement", app_metrics_rollup); + #Measurement::add_filter("apps.bytes", [$every=break_interval, $measure=set(Measurement::SUM), $rollup="AppMeasurement"]); + #Measurement::add_filter("apps.hits", [$every=break_interval, $measure=set(Measurement::UNIQUE), $rollup="AppMeasurement"]); + + Measurement::create([$epoch=break_interval, + $measurements=table(["apps.bytes"] = [$apply=set(Measurement::SUM)], + ["apps.hits"] = [$apply=set(Measurement::UNIQUE)]), + $period_finished(result: Measurement::Results) = + { + local l: Info; + l$ts = network_time(); + for ( index in result ) + { + l$bytes = double_to_count(floor(result[index]["apps.bytes"]$sum)); + l$hits = result[index]["apps.hits"]$num; + l$uniq_hosts = result[index]["apps.hits"]$unique; + Log::write(LOG, l); + } + }]); } function do_metric(id: conn_id, hostname: string, size: count) { - if ( /youtube\.com$/ in hostname && size > 512*1024 ) + if ( /\.youtube\.com$/ in hostname && size > 512*1024 ) { - Metrics::add_data("apps.bytes", [$str="youtube"], [$num=size]); - Metrics::add_data("apps.hits", [$str="youtube"], [$str=cat(id$orig_h)]); + Measurement::add_data("apps.bytes", [$str="youtube"], [$num=size]); + Measurement::add_data("apps.hits", [$str="youtube"], [$str=cat(id$orig_h)]); } else if ( /(\.facebook\.com|\.fbcdn\.net)$/ in hostname && size > 20 ) { - Metrics::add_data("apps.bytes", [$str="facebook"], [$num=size]); - Metrics::add_data("apps.hits", [$str="facebook"], [$str=cat(id$orig_h)]); + Measurement::add_data("apps.bytes", [$str="facebook"], [$num=size]); + Measurement::add_data("apps.hits", [$str="facebook"], [$str=cat(id$orig_h)]); } else if ( /\.google\.com$/ in hostname && size > 20 ) { - Metrics::add_data("apps.bytes", [$str="google"], [$num=size]); - Metrics::add_data("apps.hits", [$str="google"], [$str=cat(id$orig_h)]); + Measurement::add_data("apps.bytes", [$str="google"], [$num=size]); + Measurement::add_data("apps.hits", [$str="google"], [$str=cat(id$orig_h)]); } - else if ( /nflximg\.com$/ in hostname && size > 200*1024 ) + else if ( /\.nflximg\.com$/ in hostname && size > 200*1024 ) { - Metrics::add_data("apps.bytes", [$str="netflix"], [$num=size]); - Metrics::add_data("apps.hits", [$str="netflix"], [$str=cat(id$orig_h)]); + Measurement::add_data("apps.bytes", [$str="netflix"], [$num=size]); + Measurement::add_data("apps.hits", [$str="netflix"], [$str=cat(id$orig_h)]); } else if ( /\.(pandora|p-cdn)\.com$/ in hostname && size > 512*1024 ) { - Metrics::add_data("apps.bytes", [$str="pandora"], [$num=size]); - Metrics::add_data("apps.hits", [$str="pandora"], [$str=cat(id$orig_h)]); + Measurement::add_data("apps.bytes", [$str="pandora"], [$num=size]); + Measurement::add_data("apps.hits", [$str="pandora"], [$str=cat(id$orig_h)]); } - else if ( /gmail\.com$/ in hostname && size > 20 ) + else if ( /\.gmail\.com$/ in hostname && size > 20 ) { - Metrics::add_data("apps.bytes", [$str="gmail"], [$num=size]); - Metrics::add_data("apps.hits", [$str="gmail"], [$str=cat(id$orig_h)]); + Measurement::add_data("apps.bytes", [$str="gmail"], [$num=size]); + Measurement::add_data("apps.hits", [$str="gmail"], [$str=cat(id$orig_h)]); } } -redef record connection += { - resp_hostname: string &optional; -}; event ssl_established(c: connection) { diff --git a/scripts/policy/misc/detect-traceroute/main.bro b/scripts/policy/misc/detect-traceroute/main.bro index e62d370e45..7656ed8d03 100644 --- a/scripts/policy/misc/detect-traceroute/main.bro +++ b/scripts/policy/misc/detect-traceroute/main.bro @@ -2,7 +2,7 @@ ##! toward hosts that have sent low TTL packets. ##! It generates a notice when the number of ICMP Time Exceeded ##! messages for a source-destination pair exceeds threshold -@load base/frameworks/metrics +@load base/frameworks/measurement @load base/frameworks/signatures @load-sigs ./detect-low-ttls.sig diff --git a/scripts/policy/misc/scan.bro b/scripts/policy/misc/scan.bro index 7f5f43dbd9..570dbfe6b0 100644 --- a/scripts/policy/misc/scan.bro +++ b/scripts/policy/misc/scan.bro @@ -5,7 +5,7 @@ ##! All the authors of the old scan.bro @load base/frameworks/notice -@load base/frameworks/metrics +@load base/frameworks/measurement @load base/utils/time @@ -24,7 +24,7 @@ export { ## unique ports on a single host over the previous ## :bro:id:`port_scan_interval` time range. Port_Scan, - }; + }; ## Failed connection attempts are tracked over this time interval for the address ## scan detection. A higher interval will detect slower scanners, but may @@ -42,7 +42,7 @@ export { ## connections with on a single victim host. const port_scan_threshold = 15 &redef; - ## Custom threholds based on service for address scan. This is primarily + ## Custom thresholds based on service for address scan. This is primarily ## useful for setting reduced thresholds for specific ports. const addr_scan_custom_thresholds: table[port] of count &redef; @@ -74,7 +74,7 @@ function addr_scan_threshold_crossed(index: Metrics::Index, val: Metrics::Result $p=to_port(index$str), $sub=side, $msg=message, - $identifier=cat(index)]); + $identifier=cat(index$host)]); } function port_scan_threshold_crossed(index: Metrics::Index, val: Metrics::ResultVal) @@ -88,7 +88,7 @@ function port_scan_threshold_crossed(index: Metrics::Index, val: Metrics::Result $dst=to_addr(index$str), $sub=side, $msg=message, - $identifier=cat(index)]); + $identifier=cat(index$host)]); } event bro_init() &priority=5 diff --git a/scripts/policy/protocols/conn/conn-stats-per-host.bro b/scripts/policy/protocols/conn/conn-stats-per-host.bro index fad2331f44..d537d13b72 100644 --- a/scripts/policy/protocols/conn/conn-stats-per-host.bro +++ b/scripts/policy/protocols/conn/conn-stats-per-host.bro @@ -1,6 +1,6 @@ @load base/protocols/conn -@load base/frameworks/metrics +@load base/frameworks/measurement event bro_init() &priority=5 { diff --git a/scripts/policy/protocols/conn/metrics.bro b/scripts/policy/protocols/conn/metrics.bro index 057e23e088..62ca96ea0a 100644 --- a/scripts/policy/protocols/conn/metrics.bro +++ b/scripts/policy/protocols/conn/metrics.bro @@ -1,4 +1,4 @@ -@load base/frameworks/metrics +@load base/frameworks/measurement @load base/utils/site event bro_init() &priority=3 diff --git a/scripts/policy/protocols/ftp/detect-bruteforcing.bro b/scripts/policy/protocols/ftp/detect-bruteforcing.bro index 11d6ec71a1..59c8525c7e 100644 --- a/scripts/policy/protocols/ftp/detect-bruteforcing.bro +++ b/scripts/policy/protocols/ftp/detect-bruteforcing.bro @@ -1,6 +1,6 @@ @load base/protocols/ftp -@load base/frameworks/metrics +@load base/frameworks/measurement @load base/utils/time @@ -19,7 +19,7 @@ export { ## The time period in which the threshold needs to be crossed before ## being reset. - const bruteforce_measurement_interval = 15mins; + const bruteforce_measurement_interval = 15mins &redef; } @@ -32,7 +32,8 @@ event bro_init() $threshold_crossed(index: Metrics::Index, val: Metrics::ResultVal) = { local dur = duration_to_mins_secs(val$end-val$begin); - local message = fmt("%s had %d failed logins on %d FTP servers in %s", index$host, val$num, val$unique, dur); + local plural = val$unique>1 ? "s" : ""; + local message = fmt("%s had %d failed logins on %d FTP server%s in %s", index$host, val$num, val$unique, plural, dur); NOTICE([$note=FTP::Bruteforcing, $src=index$host, $msg=message, diff --git a/scripts/policy/protocols/http/detect-sqli.bro b/scripts/policy/protocols/http/detect-sqli.bro index 21164bc126..410d3fde31 100644 --- a/scripts/policy/protocols/http/detect-sqli.bro +++ b/scripts/policy/protocols/http/detect-sqli.bro @@ -1,7 +1,7 @@ ##! SQL injection attack detection in HTTP. @load base/frameworks/notice -@load base/frameworks/metrics +@load base/frameworks/measurement @load base/protocols/http module HTTP; diff --git a/scripts/policy/protocols/smtp/metrics.bro b/scripts/policy/protocols/smtp/metrics.bro index ac803ac621..19a3220805 100644 --- a/scripts/policy/protocols/smtp/metrics.bro +++ b/scripts/policy/protocols/smtp/metrics.bro @@ -3,7 +3,7 @@ ##! "How much mail is being sent from each local mail server per hour?" @load base/protocols/smtp -@load base/frameworks/metrics +@load base/frameworks/measurement @load base/utils/site @load base/utils/directions-and-hosts diff --git a/scripts/policy/protocols/ssh/detect-bruteforcing.bro b/scripts/policy/protocols/ssh/detect-bruteforcing.bro index edf6379bec..44e94eb361 100644 --- a/scripts/policy/protocols/ssh/detect-bruteforcing.bro +++ b/scripts/policy/protocols/ssh/detect-bruteforcing.bro @@ -2,7 +2,7 @@ ##! bruteforcing over SSH. @load base/protocols/ssh -@load base/frameworks/metrics +@load base/frameworks/measurement @load base/frameworks/notice @load base/frameworks/intel @@ -54,8 +54,8 @@ event bro_init() $identifier=cat(index$host)]); # Insert the guesser into the intel framework. Intel::insert([$host=index$host, - $meta=[$source="local", - $desc=fmt("Bro observed %0.f apparently failed SSH connections.", val$sum)]]); + $meta=[$source="local", + $desc=fmt("Bro observed %0.f apparently failed SSH connections.", val$sum)]]); }]); }