diff --git a/scripts/base/frameworks/metrics/cluster.bro b/scripts/base/frameworks/metrics/cluster.bro index 6a536efb85..60342b327f 100644 --- a/scripts/base/frameworks/metrics/cluster.bro +++ b/scripts/base/frameworks/metrics/cluster.bro @@ -60,18 +60,18 @@ global requested_results: table[string] of time = table() &create_expire=5mins; # This variable is maintained by manager nodes as they collect and aggregate # results. -global filter_results: table[string, string, string] of MetricTable &create_expire=5mins; +global filter_results: table[string, string, string] of MetricTable &read_expire=1min; # This variable is maintained by manager nodes to track how many "dones" they # collected per collection unique id. Once the number of results for a uid # matches the number of peer nodes that results should be coming from, the # result is written out and deleted from here. # TODO: add an &expire_func in case not all results are received. -global done_with: table[string] of count &create_expire=5mins &default=0; +global done_with: table[string] of count &read_expire=1min &default=0; # This variable is maintained by managers to track intermediate responses as # they are getting a global view for a certain index. -global index_requests: table[string, string, string, Index] of ResultVal &create_expire=5mins &default=[]; +global index_requests: table[string, string, string, Index] of ResultVal &read_expire=1min; # This variable is maintained by all hosts for different purposes. Non-managers # maintain it to know what indexes they have recently sent as intermediate @@ -163,7 +163,7 @@ event Metrics::cluster_index_request(uid: string, id: string, filter_name: strin @if ( Cluster::local_node_type() == Cluster::MANAGER ) # Manager's handle logging. -event Metrics::log_it(filter: Filter) +event Metrics::finish_period(filter: Filter) { #print fmt("%.6f MANAGER: breaking %s filter for %s metric", network_time(), filter$name, filter$id); local uid = unique_id(""); @@ -174,8 +174,8 @@ event Metrics::log_it(filter: Filter) # Request data from peers. event Metrics::cluster_filter_request(uid, filter$id, filter$name); - # Schedule the log_it event for the next break period. - schedule filter$every { Metrics::log_it(filter) }; + # Schedule the next finish_period event. + schedule filter$every { Metrics::finish_period(filter) }; } # This is unlikely to be called often, but it's here in case there are metrics @@ -237,6 +237,8 @@ event Metrics::cluster_filter_response(uid: string, id: string, filter_name: str ++done_with[uid]; local local_data = filter_results[uid, id, filter_name]; + local filter = filter_store[id, filter_name]; + for ( index in data ) { if ( index in local_data ) @@ -245,18 +247,18 @@ event Metrics::cluster_filter_response(uid: string, id: string, filter_name: str local_data[index] = data[index]; # If a filter is done being collected, thresholds for each index - # need to checked so we're doing it here to avoid doubly iterating + # need to be checked so we're doing it here to avoid doubly iterating # over each index. if ( Cluster::worker_count == done_with[uid] ) { - if ( check_thresholds(filter_store[id, filter_name], index, local_data[index], 1.0) ) + if ( check_thresholds(filter, index, local_data[index], 1.0) ) { - threshold_crossed(filter_store[id, filter_name], index, local_data[index]); + threshold_crossed(filter, index, local_data[index]); } } } - # If the data has been collected from all peers, we are done and ready to log. + # If the data has been collected from all peers, we are done and ready to finish. if ( Cluster::worker_count == done_with[uid] ) { local ts = network_time(); @@ -267,11 +269,30 @@ event Metrics::cluster_filter_response(uid: string, id: string, filter_name: str delete requested_results[uid]; } - write_log(ts, filter_store[id, filter_name], local_data); - + if ( filter?$rollup ) + { + for ( index in local_data ) + { + if ( index !in rollup_store ) + rollup_store[index] = table(); + rollup_store[index][id, filter_name] = local_data[index]; + + # If all of the result vals are stored then the rollup callback can be executed. + if ( |rollup_store[index]| == |rollups[filter$rollup]$filters| ) + { + rollups[filter$rollup]$callback(index, rollup_store[index]); + } + } + } + + if ( filter?$period_finished ) + filter$period_finished(ts, filter$id, filter$name, local_data); + # Clean up delete filter_results[uid, id, filter_name]; delete done_with[uid]; + # Not sure I need to reset the filter on the manager. + reset(filter); } } diff --git a/scripts/base/frameworks/metrics/main.bro b/scripts/base/frameworks/metrics/main.bro index 8d7ea26bc7..534529e020 100644 --- a/scripts/base/frameworks/metrics/main.bro +++ b/scripts/base/frameworks/metrics/main.bro @@ -8,10 +8,6 @@ export { ## The metrics logging stream identifier. redef enum Log::ID += { LOG }; - ## The default interval used for "breaking" metrics and writing the - ## current value to the logging stream. - const default_break_interval = 15mins &redef; - ## This is the interval for how often threshold based notices will happen ## after they have already fired. const threshold_crossed_restart_interval = 1hr &redef; @@ -108,63 +104,74 @@ export { ## The record type that is used for logging metrics. type Info: record { ## Timestamp at which the metric was "broken". - ts: time &log; + ts: time &log; ## Interval between logging of this filter and the last time it was logged. - ts_delta: interval &log; - ## The name of the filter being logged. Values - ## can have multiple filters which represent different perspectives on - ## the data so this is necessary to understand the value. - filter_name: string &log; + ts_delta: interval &log; ## What measurement the metric represents. - metric: string &log; + metric: string &log; ## What the metric value applies to. - index: Index &log; + index: Index &log; ## The simple numeric value of the metric. - result: ResultVal &log; + result: ResultVal &log; }; + ## Type to store a table of metrics result values. + type MetricTable: table[Index] of ResultVal; + ## Filters define how the data from a metric is aggregated and handled. ## Filters can be used to set how often the measurements are cut ## and logged or how the data within them is aggregated. It's also ## possible to disable logging and use filters solely for thresholding. type Filter: record { - ## The name for this filter so that multiple filters can be - ## applied to a single metrics to get a different view of the same - ## metric data being collected (different aggregation, break, etc). + ## A name for the filter in case multiple filters are being + ## applied to the same metric. In most cases the default + ## filter name is fine and this field does not need to be set. name: string &default="default"; - ## The metric that this filter applies to. - id: string &optional; - ## The measurements to perform on the data. - measure: set[Calculation] &optional; - ## A predicate so that you can decide per index if you would like - ## to accept the data being inserted. - pred: function(index: Metrics::Index, data: Metrics::DataPoint): bool &optional; - ## A function to normalize the index. This can be used to aggregate or - ## normalize the entire index. - normalize_func: function(index: Metrics::Index): Index &optional; - ## Global mask by to aggregate traffic measuring an attribute of hosts. - ## This is a special case of the normalize_func. - aggregation_mask: count &optional; + ## The interval at which this filter should be "broken" and written ## to the logging stream. The counters are also reset to zero at ## this time so any threshold based detection needs to be set to a ## number that should be expected to happen within this period. - every: interval &default=default_break_interval; - ## This determines if the result of this filter is sent to the metrics - ## logging stream. One use for the logging framework is as an internal - ## thresholding and statistics gathering utility that is meant to - ## never log but rather to generate notices and derive data. - log: bool &default=T; + every: interval; + + ## The measurements to perform on the data. + measure: set[Calculation] &optional; + + ## A predicate so that you can decide per index if you would like + ## to accept the data being inserted. + pred: function(index: Metrics::Index, data: Metrics::DataPoint): bool &optional; + + ## A function to normalize the index. This can be used to aggregate or + ## normalize the entire index. + normalize_func: function(index: Metrics::Index): Index &optional; + + ## Global mask by to aggregate traffic measuring an attribute of hosts. + ## This is a special case of the normalize_func. + aggregation_mask: count &optional; + ## A direct threshold for calling the $threshold_crossed function when ## the SUM is greater than or equal to this value. threshold: count &optional; + ## A series of thresholds for calling the $threshold_crossed function. threshold_series: vector of count &optional; + ## A predicate so that you can decide when to flexibly declare when ## a threshold crossed, and do extra work. threshold_func: function(index: Metrics::Index, val: Metrics::ResultVal): bool &optional; - ## A function callback that is called when a threshold is crossed. + + ## A callback with the full collection of ResultVals for this filter. This + ## is defined as a redef because the function includes a :bro:type:`Filter` + ## record which is self referential before the Filter type has been fully + ## defined and doesn't work. + period_finished: function(ts: time, metric_name: string, filter_name: string, data: Metrics::MetricTable) &optional; + + ## A callback that is called when a threshold is crossed. threshold_crossed: function(index: Metrics::Index, val: Metrics::ResultVal) &optional; + + ## A rollup to register this filter with. + rollup: string &optional; + ## A number of sample DataPoint strings to collect for the threshold ## crossing callback. samples: count &optional; @@ -187,7 +194,19 @@ export { ## ## increment: How much to increment the counter by. global add_data: function(id: string, index: Metrics::Index, data: Metrics::DataPoint); - + + ## The callback definition for rollup functions. + type RollupCallback: function(index: Metrics::Index, vals: table[string, string] of Metrics::ResultVal); + + ## Add a rollup function for merging multiple filters with matching + ## indexes. If the metrics filters being merged don't have equivalent times + ## in the $every field, an error will be generated. + ## + ## name: An arbitrary name for this filter rollup. + ## + ## vals: Each ResultVal record indexed by the appropriate metric name and filter name. + global create_index_rollup: function(name: string, rollup: RollupCallback); + ## Helper function to represent a :bro:type:`Metrics::Index` value as ## a simple string. ## @@ -195,12 +214,23 @@ export { ## ## Returns: A string reprentation of the metric index. global index2str: function(index: Metrics::Index): string; - + + ## A helper function to use with the `period_finished` field in filters. Using + ## this function is not recommended however since each metric likely has + ## different data and different semantics which would be better served by writing + ## a custom function that logs in more domain specific fashion. + global write_log: function(ts: time, metric_name: string, filter_name: string, data: Metrics::MetricTable); + ## Event to access metrics records as they are passed to the logging framework. global log_metrics: event(rec: Metrics::Info); } +redef record Filter += { + # The metric that this filter applies to. The value is automatically set. + id: string &optional; +}; + redef record ResultVal += { # Internal use only. Used for incrementally calculating variance. prev_avg: double &optional; @@ -226,9 +256,6 @@ redef record ResultVal += { threshold_series_index: count &default=0; }; -# Type to store a table of metrics values. -type MetricTable: table[Index] of ResultVal; - # Store the filters indexed on the metric identifier. global metric_filters: table[string] of vector of Filter = table(); @@ -238,16 +265,23 @@ global filter_store: table[string, string] of Filter = table(); # This is indexed by metric id and filter name. global store: table[string, string] of MetricTable = table() &default=table(); -# This is hook for watching thresholds being crossed. It is called whenever +# This is a hook for watching thresholds being crossed. It is called whenever # index values are updated and the new val is given as the `val` argument. -# It's only prototyped here because cluster and non-cluster has separate +# It's only prototyped here because cluster and non-cluster have separate # implementations. global data_added: function(filter: Filter, index: Index, val: ResultVal); +type Rollup: record { + callback: RollupCallback; + filters: set[Filter] &optional; +}; +global rollups: table[string] of Rollup; +global rollup_store: table[Index] of table[string, string] of ResultVal = {}; + + ## Event that is used to "finish" metrics and adapt the metrics ## framework for clustered or non-clustered usage. -global log_it: event(filter: Metrics::Filter); - +global finish_period: event(filter: Metrics::Filter); event bro_init() &priority=5 { @@ -279,22 +313,21 @@ function do_calculated_fields(val: ResultVal) function merge_result_vals(rv1: ResultVal, rv2: ResultVal): ResultVal { local result: ResultVal; - + # Merge $begin (take the earliest one) - result$begin = rv1$begin < rv2$begin ? rv1$begin : rv2$begin; + result$begin = (rv1$begin < rv2$begin) ? rv1$begin : rv2$begin; # Merge $end (take the latest one) - result$end = rv1$end > rv2$end ? rv1$end : rv2$end; + result$end = (rv1$end > rv2$end) ? rv1$end : rv2$end; # Merge $num result$num = rv1$num + rv2$num; # Merge $sum + result$sum = rv1$sum + rv2$sum; if ( rv1?$sum || rv2?$sum ) { - result$sum = 0; - if ( rv1?$sum ) - result$sum += rv1$sum; + result$sum = rv1?$sum ? rv1$sum : 0; if ( rv2?$sum ) result$sum += rv2$sum; } @@ -348,13 +381,15 @@ function merge_result_vals(rv1: ResultVal, rv2: ResultVal): ResultVal # Merge $unique_vals if ( rv1?$unique_vals || rv2?$unique_vals ) { - result$unique_vals = set(); if ( rv1?$unique_vals ) - for ( val1 in rv1$unique_vals ) - add result$unique_vals[val1]; + result$unique_vals = rv1$unique_vals; + if ( rv2?$unique_vals ) - for ( val2 in rv2$unique_vals ) - add result$unique_vals[val2]; + if ( ! result?$unique_vals ) + result$unique_vals = rv2$unique_vals; + else + for ( val2 in rv2$unique_vals ) + add result$unique_vals[val2]; } # Merge $sample_queue @@ -376,8 +411,9 @@ function merge_result_vals(rv1: ResultVal, rv2: ResultVal): ResultVal return result; } -function write_log(ts: time, filter: Filter, data: MetricTable) +function write_log(ts: time, metric_name: string, filter_name: string, data: Metrics::MetricTable) { + local filter = filter_store[metric_name, filter_name]; for ( index in data ) { local m: Info = [$ts=ts, @@ -386,9 +422,7 @@ function write_log(ts: time, filter: Filter, data: MetricTable) $filter_name=filter$name, $index=index, $result=data[index]]; - - if ( filter$log ) - Log::write(Metrics::LOG, m); + Log::write(LOG, m); } } @@ -401,7 +435,7 @@ function add_filter(id: string, filter: Filter) { if ( filter?$normalize_func && filter?$aggregation_mask ) { - Reporter::warning(fmt("invalid Metric filter (%s): Defined $normalize_func and $aggregation_mask.", filter$name)); + Reporter::warning(fmt("invalid Metric filter (%s): Defined both $normalize_func and $aggregation_mask.", filter$name)); return; } if ( [id, filter$name] in store ) @@ -409,7 +443,33 @@ function add_filter(id: string, filter: Filter) Reporter::warning(fmt("invalid Metric filter (%s): Filter with same name already exists.", filter$name)); return; } - + if ( filter?$rollup ) + { + if ( filter$rollup !in rollups ) + { + Reporter::warning(fmt("invalid Metric filter (%s): %s rollup doesn't exist.", filter$name, filter$rollup)); + return; + } + else + { + local every_field = 0secs; + for ( filt in rollups ) + { + if ( [id, filt] !in filter_store ) + next; + + if ( every_field == 0secs ) + every_field = filter_store[id, filt]$every; + else if ( every_field == filter_store[id, filt]$every ) + { + Reporter::warning(fmt("invalid Metric rollup for %s: Filters with differing $every fields applied to %s.", filter$name, filter$rollup)); + return; + } + } + } + add rollups[filter$rollup]$filters[filter]; + } + if ( ! filter?$id ) filter$id = id; @@ -419,8 +479,8 @@ function add_filter(id: string, filter: Filter) filter_store[id, filter$name] = filter; store[id, filter$name] = table(); - - schedule filter$every { Metrics::log_it(filter) }; + + schedule filter$every { Metrics::finish_period(filter) }; } function add_data(id: string, index: Index, data: DataPoint) @@ -513,11 +573,11 @@ function add_data(id: string, index: Index, data: DataPoint) result$var_s += (val - result$prev_avg)*(val - result$avg); } - if ( STD_DEV in filter$measure ) - { - #if ( result?$variance ) - # result$std_dev = sqrt(result$variance); - } + #if ( STD_DEV in filter$measure ) + # { + # #if ( result?$variance ) + # # result$std_dev = sqrt(result$variance); + # } if ( UNIQUE in filter$measure ) { @@ -530,8 +590,7 @@ function add_data(id: string, index: Index, data: DataPoint) } } -# This function checks if a threshold has been crossed and generates a -# notice if it has. It is also used as a method to implement +# This function checks if a threshold has been crossed. It is also used as a method to implement # mid-break-interval threshold crossing detection for cluster deployments. function check_thresholds(filter: Filter, index: Index, val: ResultVal, modify_pct: double): bool { @@ -570,7 +629,7 @@ function check_thresholds(filter: Filter, index: Index, val: ResultVal, modify_p return F; } - + function threshold_crossed(filter: Filter, index: Index, val: ResultVal) { if ( ! filter?$threshold_crossed ) @@ -586,3 +645,10 @@ function threshold_crossed(filter: Filter, index: Index, val: ResultVal) if ( filter?$threshold_series ) ++val$threshold_series_index; } + +function create_index_rollup(name: string, rollup: RollupCallback) + { + local r: Rollup = [$callback=rollup]; + r$filters=set(); + rollups[name] = r; + } diff --git a/scripts/base/frameworks/metrics/non-cluster.bro b/scripts/base/frameworks/metrics/non-cluster.bro index a94370d776..b76ca3ea48 100644 --- a/scripts/base/frameworks/metrics/non-cluster.bro +++ b/scripts/base/frameworks/metrics/non-cluster.bro @@ -2,15 +2,31 @@ module Metrics; -event Metrics::log_it(filter: Filter) +event Metrics::finish_period(filter: Filter) { - local id = filter$id; - local name = filter$name; + local data = store[filter$id, filter$name]; + if ( filter?$rollup ) + { + for ( index in data ) + { + if ( index !in rollup_store ) + rollup_store[index] = table(); + rollup_store[index][filter$id, filter$name] = data[index]; + + # If all of the result vals are stored then the rollup callback can be executed. + if ( |rollup_store[index]| == |rollups[filter$rollup]$filters| ) + { + rollups[filter$rollup]$callback(index, rollup_store[index]); + } + } + } + + if ( filter?$period_finished ) + filter$period_finished(network_time(), filter$id, filter$name, data); - write_log(network_time(), filter, store[id, name]); reset(filter); - schedule filter$every { Metrics::log_it(filter) }; + schedule filter$every { Metrics::finish_period(filter) }; } diff --git a/scripts/policy/frameworks/metrics/conn-example.bro b/scripts/policy/frameworks/metrics/conn-example.bro index e5c604a5b2..1271d6eb32 100644 --- a/scripts/policy/frameworks/metrics/conn-example.bro +++ b/scripts/policy/frameworks/metrics/conn-example.bro @@ -7,11 +7,16 @@ event bro_init() { #Metrics::add_filter("conns.originated", [$aggregation_mask=24, $break_interval=1mins]); - Metrics::add_filter("conns.originated", [$aggregation_table=Site::local_nets_table, $break_interval=1mins]); + Metrics::add_filter("conns.originated", [$every=1mins, $measure=set(Metrics::SUM), + $aggregation_table=Site::local_nets_table, + $period_finished=Metrics::write_log]); # Site::local_nets must be defined in order for this to actually do anything. - Metrics::add_filter("conns.responded", [$aggregation_table=Site::local_nets_table, $break_interval=1mins]); + Metrics::add_filter("conns.responded", [$every=1mins, $measure=set(Metrics::SUM), + $aggregation_table=Site::local_nets_table, + $period_finished=Metrics::write_log]); + } event connection_established(c: connection) diff --git a/scripts/policy/frameworks/metrics/http-example.bro b/scripts/policy/frameworks/metrics/http-example.bro index 3c60f3c931..b3284580e8 100644 --- a/scripts/policy/frameworks/metrics/http-example.bro +++ b/scripts/policy/frameworks/metrics/http-example.bro @@ -8,15 +8,16 @@ event bro_init() { - # TODO: these are waiting on a fix with table vals + records before they will work. - #Metrics::add_filter(HTTP_REQUESTS_BY_HOST_HEADER, - # [$pred(index: Metrics::Index) = { return Site::is_local_addr(index$host); }, - # $aggregation_mask=24, - # $break_interval=1min]); + Metrics::add_filter("http.request.by_host_header", + [$every=1min, $measure=set(Metrics::SUM), + $pred(index: Metrics::Index, data: Metrics::DataPoint) = { return T; return Site::is_local_addr(index$host); }, + $aggregation_mask=24, + $period_finished=Metrics::write_log]); # Site::local_nets must be defined in order for this to actually do anything. - Metrics::add_filter("http.request.by_status_code", [$aggregation_table=Site::local_nets_table, - $break_interval=1min]); + Metrics::add_filter("http.request.by_status_code", [$every=1min, $measure=set(Metrics::SUM), + $aggregation_table=Site::local_nets_table, + $period_finished=Metrics::write_log]); } event HTTP::log_http(rec: HTTP::Info) diff --git a/scripts/policy/frameworks/metrics/ssl-example.bro b/scripts/policy/frameworks/metrics/ssl-example.bro index 64e63bc215..3b9b848edb 100644 --- a/scripts/policy/frameworks/metrics/ssl-example.bro +++ b/scripts/policy/frameworks/metrics/ssl-example.bro @@ -10,11 +10,10 @@ event bro_init() { Metrics::add_filter("ssl.by_servername", [$name="no-google-ssl-servers", + $every=10secs, $measure=set(Metrics::SUM), $pred(index: Metrics::Index, data: Metrics::DataPoint) = { return (/google\.com$/ !in index$str); - }, - $break_interval=10secs - ]); + }]); } event SSL::log_ssl(rec: SSL::Info) diff --git a/scripts/policy/misc/app-metrics.bro b/scripts/policy/misc/app-metrics.bro index a89d0d8eb3..d88eb8fe6e 100644 --- a/scripts/policy/misc/app-metrics.bro +++ b/scripts/policy/misc/app-metrics.bro @@ -1,49 +1,80 @@ @load base/protocols/http @load base/protocols/ssl - @load base/frameworks/metrics module AppMetrics; export { - ## The metric break interval for the default stats collected by this script. - const break_interval = 1hr &redef; + redef enum Log::ID += { LOG }; + + type Info: record { + ts: time &log; + app: string &log; + uniq_hosts: count &log; + hits: count &log; + bytes: count &log; + }; + + ## The frequency of logging the stats collected by this script. + const break_interval = 1min &redef; } +function app_metrics_rollup(index: Metrics::Index, vals: table[string, string] of Metrics::ResultVal) + { + local l: Info; + l$ts = network_time(); + for ( [metric_name, filter_name] in vals ) + { + local val = vals[metric_name, filter_name]; + l$app = index$str; + if ( metric_name == "apps.bytes" ) + l$bytes = double_to_count(floor(val$sum)); + else if ( metric_name == "apps.hits" ) + { + l$hits = val$num; + l$uniq_hosts = val$unique; + } + } + Log::write(LOG, l); + } + event bro_init() &priority=3 { - Metrics::add_filter("apps.bytes", [$every=break_interval, $measure=set(Metrics::SUM)]); - Metrics::add_filter("apps.hits", [$every=break_interval, $measure=set(Metrics::SUM, Metrics::UNIQUE)]); + Log::create_stream(AppMetrics::LOG, [$columns=Info]); + + Metrics::create_index_rollup("AppMetrics", app_metrics_rollup); + Metrics::add_filter("apps.bytes", [$every=break_interval, $measure=set(Metrics::SUM), $period_finished=Metrics::write_log, $rollup="AppMetrics"]); + Metrics::add_filter("apps.hits", [$every=break_interval, $measure=set(Metrics::UNIQUE), $rollup="AppMetrics"]); } function do_metric(id: conn_id, hostname: string, size: count) { - if ( /youtube/ in hostname && size > 512*1024 ) + if ( /youtube\.com$/ in hostname && size > 512*1024 ) { Metrics::add_data("apps.bytes", [$str="youtube"], [$num=size]); Metrics::add_data("apps.hits", [$str="youtube"], [$str=cat(id$orig_h)]); } - else if ( /facebook.com|fbcdn.net/ in hostname && size > 20 ) + else if ( /(\.facebook\.com|\.fbcdn\.net)$/ in hostname && size > 20 ) { Metrics::add_data("apps.bytes", [$str="facebook"], [$num=size]); Metrics::add_data("apps.hits", [$str="facebook"], [$str=cat(id$orig_h)]); } - else if ( /google.com/ in hostname && size > 20 ) + else if ( /\.google\.com$/ in hostname && size > 20 ) { Metrics::add_data("apps.bytes", [$str="google"], [$num=size]); Metrics::add_data("apps.hits", [$str="google"], [$str=cat(id$orig_h)]); } - else if ( /nflximg.com/ in hostname && size > 200*1024 ) + else if ( /nflximg\.com$/ in hostname && size > 200*1024 ) { Metrics::add_data("apps.bytes", [$str="netflix"], [$num=size]); Metrics::add_data("apps.hits", [$str="netflix"], [$str=cat(id$orig_h)]); } - else if ( /pandora.com/ in hostname && size > 512*1024 ) + else if ( /\.(pandora|p-cdn)\.com$/ in hostname && size > 512*1024 ) { Metrics::add_data("apps.bytes", [$str="pandora"], [$num=size]); Metrics::add_data("apps.hits", [$str="pandora"], [$str=cat(id$orig_h)]); } - else if ( /gmail.com/ in hostname && size > 20 ) + else if ( /gmail\.com$/ in hostname && size > 20 ) { Metrics::add_data("apps.bytes", [$str="gmail"], [$num=size]); Metrics::add_data("apps.hits", [$str="gmail"], [$str=cat(id$orig_h)]); @@ -63,7 +94,7 @@ event ssl_established(c: connection) event connection_finished(c: connection) { if ( c?$resp_hostname ) - do_metric(c$id, c$resp_hostname, c$resp$num_bytes_ip); + do_metric(c$id, c$resp_hostname, c$resp$size); } event HTTP::log_http(rec: HTTP::Info) diff --git a/scripts/policy/misc/capture-loss.bro b/scripts/policy/misc/capture-loss.bro index b2d23020f8..1f0726299d 100644 --- a/scripts/policy/misc/capture-loss.bro +++ b/scripts/policy/misc/capture-loss.bro @@ -8,7 +8,6 @@ ##! for a sequence number that's above a gap). @load base/frameworks/notice -@load base/frameworks/metrics module CaptureLoss; diff --git a/scripts/policy/misc/scan.bro b/scripts/policy/misc/scan.bro index 5a8e3f7830..a0228a7955 100644 --- a/scripts/policy/misc/scan.bro +++ b/scripts/policy/misc/scan.bro @@ -43,6 +43,10 @@ export { ## Custom threholds based on service for address scan. This is primarily ## useful for setting reduced thresholds for specific ports. const addr_scan_custom_thresholds: table[port] of count &redef; + + global Scan::addr_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port); + + global Scan::port_scan_policy: hook(scanner: addr, victim: addr, scanned_port: port); } @@ -94,16 +98,14 @@ function port_scan_threshold_crossed(index: Metrics::Index, val: Metrics::Result event bro_init() &priority=5 { # Note: addr scans are trcked similar to: table[src_ip, port] of set(dst); - Metrics::add_filter("scan.addr.fail", [$log=F, - $every=addr_scan_interval, + Metrics::add_filter("scan.addr.fail", [$every=addr_scan_interval, $measure=set(Metrics::UNIQUE), $threshold_func=check_addr_scan_threshold, $threshold=addr_scan_threshold, $threshold_crossed=addr_scan_threshold_crossed]); # Note: port scans are tracked similar to: table[src_ip, dst_ip] of set(port); - Metrics::add_filter("scan.port.fail", [$log=F, - $every=port_scan_interval, + Metrics::add_filter("scan.port.fail", [$every=port_scan_interval, $measure=set(Metrics::UNIQUE), $threshold=port_scan_threshold, $threshold_crossed=port_scan_threshold_crossed]); @@ -146,11 +148,11 @@ function add_metrics(id: conn_id, reverse: bool) #if ( |analyze_subnets| > 0 && host !in analyze_subnets ) # return F; - # Probably do a hook point here? - Metrics::add_data("scan.addr.fail", [$host=scanner, $str=cat(scanned_port)], [$str=cat(victim)]); + if ( hook Scan::addr_scan_policy(scanner, victim, scanned_port) ) + Metrics::add_data("scan.addr.fail", [$host=scanner, $str=cat(scanned_port)], [$str=cat(victim)]); - # Probably do a hook point here? - Metrics::add_data("scan.port.fail", [$host=scanner, $str=cat(victim)], [$str=cat(scanned_port)]); + if ( hook Scan::port_scan_policy(scanner, victim, scanned_port) ) + Metrics::add_data("scan.port.fail", [$host=scanner, $str=cat(victim)], [$str=cat(scanned_port)]); } function is_failed_conn(c: connection): bool diff --git a/scripts/policy/protocols/conn/conn-stats-per-host.bro b/scripts/policy/protocols/conn/conn-stats-per-host.bro index df58081163..fad2331f44 100644 --- a/scripts/policy/protocols/conn/conn-stats-per-host.bro +++ b/scripts/policy/protocols/conn/conn-stats-per-host.bro @@ -6,10 +6,12 @@ event bro_init() &priority=5 { Metrics::add_filter("conn.orig.data", [$every=5mins, - $measure=set(Metrics::VARIANCE, Metrics::AVG, Metrics::MAX, Metrics::MIN, Metrics::STD_DEV)]); + $measure=set(Metrics::VARIANCE, Metrics::AVG, Metrics::MAX, Metrics::MIN, Metrics::STD_DEV), + $period_finished=Metrics::write_log]); Metrics::add_filter("conn.resp.data", [$every=5mins, - $measure=set(Metrics::VARIANCE, Metrics::AVG, Metrics::MAX, Metrics::MIN, Metrics::STD_DEV)]); + $measure=set(Metrics::VARIANCE, Metrics::AVG, Metrics::MAX, Metrics::MIN, Metrics::STD_DEV), + $period_finished=Metrics::write_log]); } diff --git a/scripts/policy/protocols/conn/metrics.bro b/scripts/policy/protocols/conn/metrics.bro index 0fb5fa2134..057e23e088 100644 --- a/scripts/policy/protocols/conn/metrics.bro +++ b/scripts/policy/protocols/conn/metrics.bro @@ -3,8 +3,10 @@ event bro_init() &priority=3 { - Metrics::add_filter("conns.country", [$every=1hr, $measure=set(Metrics::SUM)]); - Metrics::add_filter("hosts.active", [$every=1hr, $measure=set(Metrics::SUM)]); + Metrics::add_filter("conns.country", [$every=1hr, $measure=set(Metrics::SUM), + $period_finished=Metrics::write_log]); + Metrics::add_filter("hosts.active", [$every=1hr, $measure=set(Metrics::SUM), + $period_finished=Metrics::write_log]); } event connection_established(c: connection) &priority=3 diff --git a/testing/btest/Baseline/scripts.base.frameworks.metrics.basic-cluster/manager-1.metrics.log b/testing/btest/Baseline/scripts.base.frameworks.metrics.basic-cluster/manager-1.metrics.log index 98794673f1..bdc86c68bb 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.metrics.basic-cluster/manager-1.metrics.log +++ b/testing/btest/Baseline/scripts.base.frameworks.metrics.basic-cluster/manager-1.metrics.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path metrics -#open 2012-12-04-15-53-23 -#fields ts ts_delta filter_name metric index.str index.host index.network result.begin result.end result.num result.sum result.min result.max result.avg result.variance result.std_dev result.unique -#types time interval string string string addr subnet time time count double double double double double double count -1354636403.682565 3.000000 default test.metric - 6.5.4.3 - 1354636401.774655 1354636401.782720 2 6.0 1.0 5.0 3.0 4.0 2.0 - -1354636403.682565 3.000000 default test.metric - 1.2.3.4 - 1354636401.774655 1354636401.782720 9 437.0 3.0 95.0 48.555556 674.469136 25.970544 - -1354636403.682565 3.000000 default test.metric - 7.2.1.5 - 1354636401.774655 1354636401.782720 2 145.0 54.0 91.0 72.5 342.25 18.5 - -#close 2012-12-04-15-53-23 +#open 2012-12-17-18-43-15 +#fields ts ts_delta metric index.str index.host index.network result.begin result.end result.num result.sum result.min result.max result.avg result.variance result.std_dev result.unique +#types time interval string string addr subnet time time count double double double double double double count +1355769795.365325 3.000000 test.metric - 6.5.4.3 - 1355769793.449322 1355769793.458467 2 6.0 1.0 5.0 3.0 4.0 2.0 2 +1355769795.365325 3.000000 test.metric - 1.2.3.4 - 1355769793.449322 1355769793.458467 9 437.0 3.0 95.0 48.555556 674.469136 25.970544 8 +1355769795.365325 3.000000 test.metric - 7.2.1.5 - 1355769793.449322 1355769793.458467 2 145.0 54.0 91.0 72.5 342.25 18.5 2 +#close 2012-12-17-18-43-21 diff --git a/testing/btest/Baseline/scripts.base.frameworks.metrics.basic/metrics.log b/testing/btest/Baseline/scripts.base.frameworks.metrics.basic/metrics.log index 63bf7c95fb..51d892e8d5 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.metrics.basic/metrics.log +++ b/testing/btest/Baseline/scripts.base.frameworks.metrics.basic/metrics.log @@ -3,10 +3,10 @@ #empty_field (empty) #unset_field - #path metrics -#open 2012-12-04-15-55-13 -#fields ts ts_delta filter_name metric index.str index.host index.network result.begin result.end result.num result.sum result.min result.max result.avg result.variance result.std_dev result.unique -#types time interval string string string addr subnet time time count double double double double double double count -1354636513.492214 3.000000 foo-bar test.metric - 6.5.4.3 - 1354636513.492214 1354636513.492214 1 2.0 2.0 2.0 2.0 0.0 0.0 - -1354636513.492214 3.000000 foo-bar test.metric - 1.2.3.4 - 1354636513.492214 1354636513.492214 5 221.0 5.0 94.0 44.2 915.36 30.254917 - -1354636513.492214 3.000000 foo-bar test.metric - 7.2.1.5 - 1354636513.492214 1354636513.492214 1 1.0 1.0 1.0 1.0 0.0 0.0 - -#close 2012-12-04-15-55-13 +#open 2012-12-17-18-43-45 +#fields ts ts_delta metric index.str index.host index.network result.begin result.end result.num result.sum result.min result.max result.avg result.variance result.std_dev result.unique +#types time interval string string addr subnet time time count double double double double double double count +1355769825.947161 3.000000 test.metric - 6.5.4.3 - 1355769825.947161 1355769825.947161 1 2.0 2.0 2.0 2.0 0.0 0.0 - +1355769825.947161 3.000000 test.metric - 1.2.3.4 - 1355769825.947161 1355769825.947161 5 221.0 5.0 94.0 44.2 915.36 30.254917 - +1355769825.947161 3.000000 test.metric - 7.2.1.5 - 1355769825.947161 1355769825.947161 1 1.0 1.0 1.0 1.0 0.0 0.0 - +#close 2012-12-17-18-43-45 diff --git a/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro b/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro index 41ef9b57dc..c68a4f7beb 100644 --- a/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro +++ b/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro @@ -19,11 +19,23 @@ redef Cluster::nodes = { redef Log::default_rotation_interval = 0secs; +global n = 0; + event bro_init() &priority=5 { Metrics::add_filter("test.metric", [$every=3secs, - $measure=set(Metrics::SUM, Metrics::MIN, Metrics::MAX, Metrics::AVG, Metrics::STD_DEV, Metrics::VARIANCE)]); + $measure=set(Metrics::SUM, Metrics::MIN, Metrics::MAX, Metrics::AVG, Metrics::STD_DEV, Metrics::VARIANCE, Metrics::UNIQUE), + $period_finished(ts: time, metric_name: string, filter_name: string, data: Metrics::MetricTable) = + { + Metrics::write_log(ts, metric_name, filter_name, data); + if ( ++n == 3 ) + { + terminate_communication(); + terminate(); + } + } + ]); } event remote_connection_closed(p: event_peer) @@ -64,22 +76,10 @@ event ready_for_data() @if ( Cluster::local_node_type() == Cluster::MANAGER ) -global n = 0; global peer_count = 0; -event Metrics::log_metrics(rec: Metrics::Info) - { - ++n; - if ( n == 3 ) - { - terminate_communication(); - terminate(); - } - } - event remote_connection_handshake_done(p: event_peer) { - print p; ++peer_count; if ( peer_count == 3 ) event ready_for_data(); diff --git a/testing/btest/scripts/base/frameworks/metrics/basic.bro b/testing/btest/scripts/base/frameworks/metrics/basic.bro index 12163ed689..e665f2ea5c 100644 --- a/testing/btest/scripts/base/frameworks/metrics/basic.bro +++ b/testing/btest/scripts/base/frameworks/metrics/basic.bro @@ -6,7 +6,9 @@ event bro_init() &priority=5 Metrics::add_filter("test.metric", [$name="foo-bar", $every=3secs, - $measure=set(Metrics::SUM, Metrics::VARIANCE, Metrics::AVG, Metrics::MAX, Metrics::MIN, Metrics::STD_DEV)]); + $measure=set(Metrics::SUM, Metrics::VARIANCE, Metrics::AVG, Metrics::MAX, Metrics::MIN, Metrics::STD_DEV), + $period_finished=Metrics::write_log]); + Metrics::add_data("test.metric", [$host=1.2.3.4], [$num=5]); Metrics::add_data("test.metric", [$host=1.2.3.4], [$num=22]); Metrics::add_data("test.metric", [$host=1.2.3.4], [$num=94]); diff --git a/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro b/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro index 3341fa1887..b16645dbe6 100644 --- a/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro +++ b/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro @@ -26,8 +26,7 @@ event bro_init() &priority=5 $threshold_crossed(index: Metrics::Index, val: Metrics::ResultVal) = { print "A test metric threshold was crossed!"; terminate(); - } - ]); + }]); } event remote_connection_closed(p: event_peer) diff --git a/testing/btest/scripts/base/frameworks/metrics/thresholding.bro b/testing/btest/scripts/base/frameworks/metrics/thresholding.bro index bd0cd6faae..f39443fc2a 100644 --- a/testing/btest/scripts/base/frameworks/metrics/thresholding.bro +++ b/testing/btest/scripts/base/frameworks/metrics/thresholding.bro @@ -15,8 +15,7 @@ event bro_init() &priority=5 $threshold=5, $threshold_crossed(index: Metrics::Index, val: Metrics::ResultVal) = { print fmt("THRESHOLD: hit a threshold value at %.0f for %s", val$sum, Metrics::index2str(index)); - }, - $log=F]); + }]); Metrics::add_filter("test.metric", [$name="foobar2", @@ -25,8 +24,7 @@ event bro_init() &priority=5 $threshold_series=vector(3,6,800), $threshold_crossed(index: Metrics::Index, val: Metrics::ResultVal) = { print fmt("THRESHOLD_SERIES: hit a threshold series value at %.0f for %s", val$sum, Metrics::index2str(index)); - }, - $log=F]); + }]); Metrics::add_filter("test.metric", [$every=3secs, $measure=set(Metrics::SUM), @@ -36,8 +34,7 @@ event bro_init() &priority=5 }, $threshold_crossed(index: Metrics::Index, val: Metrics::ResultVal) = { print fmt("THRESHOLD_FUNC: hit a threshold function value at %.0f for %s", val$sum, Metrics::index2str(index)); - }, - $log=F]); + }]); Metrics::add_data("test.metric", [$host=1.2.3.4], [$num=3]); Metrics::add_data("test.metric", [$host=6.5.4.3], [$num=2]);