diff --git a/CHANGES b/CHANGES index e3d20b84b6..03b067f290 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,38 @@ +2.1-498 | 2013-05-03 17:44:08 -0700 + + * Table lookups return copy of non-const &default vals. This + prevents unintentional modifications to the &default value itself. + Addresses #981. (Jon Siwek) + +2.1-496 | 2013-05-03 15:54:47 -0700 + + * Fix memory leak and unnecessary allocations in OpaqueVal. + Addresses #986. (Matthias Vallentin) + +2.1-492 | 2013-05-02 12:46:26 -0700 + + * Work-around for sumstats framework not propagating updates after + intermediate check in cluster environments. (Bernhard Amann) + + * Always apply tcp_connection_attempt. Before this change it was + only applied when a connection_attempt() event handler was + defined. (Robin Sommer) + + * Fixing coverage.bare-mode-errors test. (Robin Sommer) + +2.1-487 | 2013-05-01 18:03:22 -0700 + + * Always apply tcp_connection_attempt timer, even if no + connection_attempt() event handler is defined. (Robin Sommer) + +2.1-486 | 2013-05-01 15:28:45 -0700 + + * New framework for computing summary statistics in + base/framework/sumstats. This replaces the metrics frameworks, and + comes with a number of applications build on top, see NEWS. More + documentation to follow. (Seth Hall) + 2.1-397 | 2013-04-29 21:19:00 -0700 * Fixing memory leaks in CompHash implementation. Addresses #987. diff --git a/NEWS b/NEWS index 8605dcdbd4..4c0e2b45cc 100644 --- a/NEWS +++ b/NEWS @@ -126,6 +126,9 @@ Changed Functionality - Removed the byte_len() and length() bif functions. Use the "|...|" operator instead. +- The SSH::Login notice has been superseded by an corresponding + intelligence framework observation (SSH::SUCCESSFUL_LOGIN). + Bro 2.1 ------- @@ -209,6 +212,27 @@ New Functionality outputs. We do not yet recommend them for production (but welcome feedback!) +- Summary statistics framework. [Extend] + +- A number of new applications build on top of the summary statistics + framework: + + * Scan detection: Detectors for port and address scans return. See + policy/misc/scan.bro. + + * Tracerouter detector: policy/misc/detect-traceroute + + * Web application detection/measurement: policy/misc/app-metrics.bro + + * FTP brute-forcing detector: policy/protocols/ftp/detect-bruteforcing.bro + + * HTTP-based SQL injection detector: policy/protocols/http/detect-sqli.bro + (existed before, but now ported to the new framework) + + * SSH brute-forcing detector feeding the intelligence framework: + policy/protocols/ssh/detect-bruteforcing.bro + + Changed Functionality ~~~~~~~~~~~~~~~~~~~~~ diff --git a/VERSION b/VERSION index 4809e9f2e9..7de1d6c4ee 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1-397 +2.1-498 diff --git a/scripts/base/frameworks/sumstats/cluster.bro b/scripts/base/frameworks/sumstats/cluster.bro index 405395a687..be0a5b5ded 100644 --- a/scripts/base/frameworks/sumstats/cluster.bro +++ b/scripts/base/frameworks/sumstats/cluster.bro @@ -10,49 +10,48 @@ module SumStats; export { - ## Allows a user to decide how large of result groups the - ## workers should transmit values for cluster stats aggregation. + ## Allows a user to decide how large of result groups the workers should transmit + ## values for cluster stats aggregation. const cluster_send_in_groups_of = 50 &redef; - - ## The percent of the full threshold value that needs to be met - ## on a single worker for that worker to send the value to its manager in - ## order for it to request a global view for that value. There is no - ## requirement that the manager requests a global view for the key - ## since it may opt not to if it requested a global view for the key - ## recently. + + ## The percent of the full threshold value that needs to be met on a single worker + ## for that worker to send the value to its manager in order for it to request a + ## global view for that value. There is no requirement that the manager requests + ## a global view for the key since it may opt not to if it requested a global view + ## for the key recently. const cluster_request_global_view_percent = 0.2 &redef; ## This is to deal with intermediate update overload. A manager will only allow - ## this many intermediate update requests to the workers to be inflight at - ## any given time. Requested intermediate updates are currently thrown out - ## and not performed. In practice this should hopefully have a minimal effect. + ## this many intermediate update requests to the workers to be inflight at any + ## given time. Requested intermediate updates are currently thrown out and not + ## performed. In practice this should hopefully have a minimal effect. const max_outstanding_global_views = 10 &redef; - ## Intermediate updates can cause overload situations on very large clusters. - ## This option may help reduce load and correct intermittent problems. - ## The goal for this option is also meant to be temporary. + ## Intermediate updates can cause overload situations on very large clusters. This + ## option may help reduce load and correct intermittent problems. The goal for this + ## option is also meant to be temporary. const enable_intermediate_updates = T &redef; - ## Event sent by the manager in a cluster to initiate the - ## collection of values for a sumstat. + ## Event sent by the manager in a cluster to initiate the collection of values for + ## a sumstat. global cluster_ss_request: event(uid: string, ssid: string); - ## Event sent by nodes that are collecting sumstats after receiving - ## a request for the sumstat from the manager. + ## Event sent by nodes that are collecting sumstats after receiving a request for + ## the sumstat from the manager. global cluster_ss_response: event(uid: string, ssid: string, data: ResultTable, done: bool); - ## This event is sent by the manager in a cluster to initiate the - ## collection of a single key value from a sumstat. It's typically - ## used to get intermediate updates before the break interval triggers - ## to speed detection of a value crossing a threshold. + ## This event is sent by the manager in a cluster to initiate the collection of + ## a single key value from a sumstat. It's typically used to get intermediate + ## updates before the break interval triggers to speed detection of a value + ## crossing a threshold. global cluster_key_request: event(uid: string, ssid: string, key: Key); - ## This event is sent by nodes in response to a + ## This event is sent by nodes in response to a ## :bro:id:`SumStats::cluster_key_request` event. global cluster_key_response: event(uid: string, ssid: string, key: Key, result: Result); - ## This is sent by workers to indicate that they crossed the percent of the - ## current threshold by the percentage defined globally in + ## This is sent by workers to indicate that they crossed the percent + ## of the current threshold by the percentage defined globally in ## :bro:id:`SumStats::cluster_request_global_view_percent` global cluster_key_intermediate_response: event(ssid: string, key: SumStats::Key); @@ -69,7 +68,7 @@ redef Cluster::manager2worker_events += /SumStats::thresholds_reset/; redef Cluster::worker2manager_events += /SumStats::cluster_(ss_response|key_response|key_intermediate_response)/; @if ( Cluster::local_node_type() != Cluster::MANAGER ) -# This variable is maintained to know what keys have recently sent as +# This variable is maintained to know what keys have recently sent as # intermediate updates so they don't overwhelm their manager. The count that is # yielded is the number of times the percentage threshold has been crossed and # an intermediate result has been received. @@ -82,7 +81,7 @@ event bro_init() &priority=-100 reducer_store = table(); } -# This is done on all non-manager node types in the event that a sumstat is +# This is done on all non-manager node types in the event that a sumstat is # being collected somewhere other than a worker. function data_added(ss: SumStat, key: Key, result: Result) { @@ -92,9 +91,9 @@ function data_added(ss: SumStat, key: Key, result: Result) return; # If val is 5 and global view % is 0.1 (10%), pct_val will be 50. If that - # crosses the full threshold then it's a candidate to send as an + # crosses the full threshold then it's a candidate to send as an # intermediate update. - if ( enable_intermediate_updates && + if ( enable_intermediate_updates && check_thresholds(ss, key, result, cluster_request_global_view_percent) ) { # kick off intermediate update @@ -113,19 +112,21 @@ event SumStats::send_data(uid: string, ssid: string, data: ResultTable) { local_data[key] = data[key]; delete data[key]; - + # Only send cluster_send_in_groups_of at a time. Queue another # event to send the next group. if ( cluster_send_in_groups_of == ++num_added ) break; } - + local done = F; # If data is empty, this sumstat is done. if ( |data| == 0 ) done = T; - - event SumStats::cluster_ss_response(uid, ssid, local_data, done); + + # Note: copy is needed to compensate serialization caching issue. This should be + # changed to something else later. + event SumStats::cluster_ss_response(uid, ssid, copy(local_data), done); if ( ! done ) schedule 0.01 sec { SumStats::send_data(uid, ssid, data) }; } @@ -133,7 +134,7 @@ event SumStats::send_data(uid: string, ssid: string, data: ResultTable) event SumStats::cluster_ss_request(uid: string, ssid: string) { #print fmt("WORKER %s: received the cluster_ss_request event for %s.", Cluster::node, id); - + # Initiate sending all of the data for the requested stats. if ( ssid in result_store ) event SumStats::send_data(uid, ssid, result_store[ssid]); @@ -145,13 +146,16 @@ event SumStats::cluster_ss_request(uid: string, ssid: string) if ( ssid in stats_store ) reset(stats_store[ssid]); } - + event SumStats::cluster_key_request(uid: string, ssid: string, key: Key) { if ( ssid in result_store && key in result_store[ssid] ) { #print fmt("WORKER %s: received the cluster_key_request event for %s=%s.", Cluster::node, key2str(key), data); - event SumStats::cluster_key_response(uid, ssid, key, result_store[ssid][key]); + + # Note: copy is needed to compensate serialization caching issue. This should be + # changed to something else later. + event SumStats::cluster_key_response(uid, ssid, key, copy(result_store[ssid][key])); } else { @@ -179,27 +183,27 @@ event SumStats::thresholds_reset(ssid: string) @if ( Cluster::local_node_type() == Cluster::MANAGER ) -# This variable is maintained by manager nodes as they collect and aggregate -# results. +# This variable is maintained by manager nodes as they collect and aggregate +# results. # Index on a uid. global stats_results: table[string] of ResultTable &read_expire=1min; # This variable is maintained by manager nodes to track how many "dones" they -# collected per collection unique id. Once the number of results for a uid -# matches the number of peer nodes that results should be coming from, the +# collected per collection unique id. Once the number of results for a uid +# matches the number of peer nodes that results should be coming from, the # result is written out and deleted from here. # Indexed on a uid. # TODO: add an &expire_func in case not all results are received. global done_with: table[string] of count &read_expire=1min &default=0; -# This variable is maintained by managers to track intermediate responses as -# they are getting a global view for a certain key. +# This variable is maintained by managers to track intermediate responses as +# they are getting a global view for a certain key. # Indexed on a uid. global key_requests: table[string] of Result &read_expire=1min; # This variable is maintained by managers to prevent overwhelming communication due -# to too many intermediate updates. Each sumstat is tracked separately so that -# one won't overwhelm and degrade other quieter sumstats. +# to too many intermediate updates. Each sumstat is tracked separately so that +# one won't overwhelm and degrade other quieter sumstats. # Indexed on a sumstat id. global outstanding_global_views: table[string] of count &default=0; @@ -211,11 +215,11 @@ event SumStats::finish_epoch(ss: SumStat) { #print fmt("%.6f MANAGER: breaking %s sumstat for %s sumstat", network_time(), ss$name, ss$id); local uid = unique_id(""); - + if ( uid in stats_results ) delete stats_results[uid]; stats_results[uid] = table(); - + # Request data from peers. event SumStats::cluster_ss_request(uid, ss$id); } @@ -224,7 +228,7 @@ event SumStats::finish_epoch(ss: SumStat) schedule ss$epoch { SumStats::finish_epoch(ss) }; } -# This is unlikely to be called often, but it's here in +# This is unlikely to be called often, but it's here in # case there are sumstats being collected by managers. function data_added(ss: SumStat, key: Key, result: Result) { @@ -234,7 +238,7 @@ function data_added(ss: SumStat, key: Key, result: Result) event SumStats::cluster_threshold_crossed(ss$id, key, threshold_tracker[ss$id][key]); } } - + event SumStats::cluster_key_response(uid: string, ssid: string, key: Key, result: Result) { #print fmt("%0.6f MANAGER: receiving key data from %s - %s=%s", network_time(), get_event_peer()$descr, key2str(key), result); @@ -277,7 +281,7 @@ event SumStats::cluster_key_intermediate_response(ssid: string, key: Key) if ( ssid in outstanding_global_views && |outstanding_global_views[ssid]| > max_outstanding_global_views ) { - # Don't do this intermediate update. Perhaps at some point in the future + # Don't do this intermediate update. Perhaps at some point in the future # we will queue and randomly select from these ignored intermediate # update requests. return; @@ -308,7 +312,7 @@ event SumStats::cluster_ss_response(uid: string, ssid: string, data: ResultTable local_data[key] = data[key]; # If a stat is done being collected, thresholds for each key - # need to be checked so we're doing it here to avoid doubly + # need to be checked so we're doing it here to avoid doubly # iterating over each key. if ( Cluster::worker_count == done_with[uid] ) { @@ -319,7 +323,7 @@ event SumStats::cluster_ss_response(uid: string, ssid: string, data: ResultTable } } } - + # If the data has been collected from all peers, we are done and ready to finish. if ( Cluster::worker_count == done_with[uid] ) { diff --git a/scripts/base/frameworks/sumstats/main.bro b/scripts/base/frameworks/sumstats/main.bro index ef7a34a4a4..6864966766 100644 --- a/scripts/base/frameworks/sumstats/main.bro +++ b/scripts/base/frameworks/sumstats/main.bro @@ -1,5 +1,5 @@ -##! The summary statistics framework provides a way to -##! summarize large streams of data into simple reduced +##! The summary statistics framework provides a way to +##! summarize large streams of data into simple reduced ##! measurements. module SumStats; @@ -10,24 +10,24 @@ export { PLACEHOLDER }; - ## Represents a thing which is having summarization + ## Represents a thing which is having summarization ## results collected for it. type Key: record { - ## A non-address related summarization or a sub-key for - ## an address based summarization. An example might be + ## A non-address related summarization or a sub-key for + ## an address based summarization. An example might be ## successful SSH connections by client IP address ## where the client string would be the key value. - ## Another example might be number of HTTP requests to - ## a particular value in a Host header. This is an - ## example of a non-host based metric since multiple - ## IP addresses could respond for the same Host + ## Another example might be number of HTTP requests to + ## a particular value in a Host header. This is an + ## example of a non-host based metric since multiple + ## IP addresses could respond for the same Host ## header value. str: string &optional; - + ## Host is the value to which this metric applies. host: addr &optional; }; - + ## Represents data being added for a single observation. ## Only supply a single field at a time! type Observation: record { @@ -40,17 +40,17 @@ export { }; type Reducer: record { - ## Observation stream identifier for the reducer + ## Observation stream identifier for the reducer ## to attach to. stream: string; ## The calculations to perform on the data points. apply: set[Calculation]; - - ## A predicate so that you can decide per key if you + + ## A predicate so that you can decide per key if you ## would like to accept the data being inserted. pred: function(key: SumStats::Key, obs: SumStats::Observation): bool &optional; - + ## A function to normalize the key. This can be used to aggregate or ## normalize the entire key. normalize_key: function(key: SumStats::Key): Key &optional; @@ -59,11 +59,11 @@ export { ## Value calculated for an observation stream fed into a reducer. ## Most of the fields are added by plugins. type ResultVal: record { - ## The time when the first observation was added to + ## The time when the first observation was added to ## this result value. begin: time; - ## The time when the last observation was added to + ## The time when the last observation was added to ## this result value. end: time; @@ -74,55 +74,56 @@ export { ## Type to store results for multiple reducers. type Result: table[string] of ResultVal; - ## Type to store a table of sumstats results indexed + ## Type to store a table of sumstats results indexed ## by keys. type ResultTable: table[Key] of Result; - ## SumStats represent an aggregation of reducers along with + ## SumStats represent an aggregation of reducers along with ## mechanisms to handle various situations like the epoch ending ## or thresholds being crossed. - ## It's best to not access any global state outside - ## of the variables given to the callbacks because there - ## is no assurance provided as to where the callbacks + ## + ## It's best to not access any global state outside + ## of the variables given to the callbacks because there + ## is no assurance provided as to where the callbacks ## will be executed on clusters. type SumStat: record { - ## The interval at which this filter should be "broken" - ## and the '$epoch_finished' callback called. The + ## The interval at which this filter should be "broken" + ## and the '$epoch_finished' callback called. The ## results are also reset at this time so any threshold - ## based detection needs to be set to a - ## value that should be expected to happen within + ## based detection needs to be set to a + ## value that should be expected to happen within ## this epoch. epoch: interval; ## The reducers for the SumStat reducers: set[Reducer]; - ## Provide a function to calculate a value from the - ## :bro:see:`Result` structure which will be used - ## for thresholding. + ## Provide a function to calculate a value from the + ## :bro:see:`Result` structure which will be used + ## for thresholding. ## This is required if a $threshold value is given. threshold_val: function(key: SumStats::Key, result: SumStats::Result): count &optional; - ## The threshold value for calling the + ## The threshold value for calling the ## $threshold_crossed callback. threshold: count &optional; - - ## A series of thresholds for calling the + + ## A series of thresholds for calling the ## $threshold_crossed callback. threshold_series: vector of count &optional; ## A callback that is called when a threshold is crossed. threshold_crossed: function(key: SumStats::Key, result: SumStats::Result) &optional; - - ## A callback with the full collection of Results for + + ## A callback with the full collection of Results for ## this SumStat. epoch_finished: function(rt: SumStats::ResultTable) &optional; }; - + ## Create a summary statistic. global create: function(ss: SumStats::SumStat); - ## Add data into an observation stream. This should be + ## Add data into an observation stream. This should be ## called when a script has measured some point value. ## ## id: The observation stream identifier that the data @@ -143,13 +144,13 @@ export { }; ## This event is generated when thresholds are reset for a SumStat. - ## + ## ## ssid: SumStats ID that thresholds were reset for. global thresholds_reset: event(ssid: string); - ## Helper function to represent a :bro:type:`SumStats::Key` value as + ## Helper function to represent a :bro:type:`SumStats::Key` value as ## a simple string. - ## + ## ## key: The metric key that is to be converted into a string. ## ## Returns: A string representation of the metric key. @@ -181,16 +182,17 @@ global result_store: table[string] of ResultTable = table(); # Store of threshold information. global thresholds_store: table[string, Key] of bool = table(); -# This is called whenever -# key values are updated and the new val is given as the `val` argument. -# It's only prototyped here because cluster and non-cluster have separate -# implementations. +# This is called whenever key values are updated and the new val is given as the +# `val` argument. It's only prototyped here because cluster and non-cluster have +# separate implementations. global data_added: function(ss: SumStat, key: Key, result: Result); # Prototype the hook point for plugins to do calculations. global observe_hook: hook(r: Reducer, val: double, data: Observation, rv: ResultVal); + # Prototype the hook point for plugins to initialize any result values. global init_resultval_hook: hook(r: Reducer, rv: ResultVal); + # Prototype the hook point for plugins to merge Results. global compose_resultvals_hook: hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal); @@ -252,7 +254,7 @@ function compose_results(r1: Result, r2: Result): Result result[data_id] = r2[data_id]; } } - + return result; } @@ -306,25 +308,25 @@ function observe(id: string, key: Key, obs: Observation) if ( r?$normalize_key ) key = r$normalize_key(copy(key)); - # If this reducer has a predicate, run the predicate + # If this reducer has a predicate, run the predicate # and skip this key if the predicate return false. if ( r?$pred && ! r$pred(key, obs) ) next; - + local ss = stats_store[r$sid]; - + # If there is a threshold and no epoch_finished callback # we don't need to continue counting since the data will # never be accessed. This was leading - # to some state management issues when measuring + # to some state management issues when measuring # uniqueness. - # NOTE: this optimization could need removed in the + # NOTE: this optimization could need removed in the # future if on demand access is provided to the # SumStats results. if ( ! ss?$epoch_finished && r$sid in threshold_tracker && key in threshold_tracker[r$sid] && - ( ss?$threshold && + ( ss?$threshold && threshold_tracker[r$sid][key]$is_threshold_crossed ) || ( ss?$threshold_series && threshold_tracker[r$sid][key]$threshold_series_index+1 == |ss$threshold_series| ) ) @@ -356,7 +358,7 @@ function observe(id: string, key: Key, obs: Observation) } } -# This function checks if a threshold has been crossed. It is also used as a method to implement +# This function checks if a threshold has been crossed. It is also used as a method to implement # mid-break-interval threshold crossing detection for cluster deployments. function check_thresholds(ss: SumStat, key: Key, result: Result, modify_pct: double): bool { @@ -399,7 +401,7 @@ function check_thresholds(ss: SumStat, key: Key, result: Result, modify_pct: dou |ss$threshold_series| >= tt$threshold_series_index && watch >= ss$threshold_series[tt$threshold_series_index] ) { - # A threshold series was given and the value crossed the next + # A threshold series was given and the value crossed the next # value in the series. return T; } diff --git a/scripts/base/frameworks/sumstats/non-cluster.bro b/scripts/base/frameworks/sumstats/non-cluster.bro index 21386a246e..f27d4b5cfb 100644 --- a/scripts/base/frameworks/sumstats/non-cluster.bro +++ b/scripts/base/frameworks/sumstats/non-cluster.bro @@ -15,8 +15,8 @@ event SumStats::finish_epoch(ss: SumStat) schedule ss$epoch { SumStats::finish_epoch(ss) }; } - - + + function data_added(ss: SumStat, key: Key, result: Result) { if ( check_thresholds(ss, key, result, 1.0) ) diff --git a/scripts/base/frameworks/sumstats/plugins/average.bro b/scripts/base/frameworks/sumstats/plugins/average.bro index baabb8ca5e..ad82a91d20 100644 --- a/scripts/base/frameworks/sumstats/plugins/average.bro +++ b/scripts/base/frameworks/sumstats/plugins/average.bro @@ -1,9 +1,9 @@ -@load base/frameworks/sumstats +@load base/frameworks/sumstats/main module SumStats; export { - redef enum Calculation += { + redef enum Calculation += { ## Calculate the average of the values. AVERAGE }; @@ -33,4 +33,4 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal) result$average = rv1$average; else if ( rv2?$average ) result$average = rv2$average; - } \ No newline at end of file + } diff --git a/scripts/base/frameworks/sumstats/plugins/max.bro b/scripts/base/frameworks/sumstats/plugins/max.bro index 532883d46e..f9ff9258ee 100644 --- a/scripts/base/frameworks/sumstats/plugins/max.bro +++ b/scripts/base/frameworks/sumstats/plugins/max.bro @@ -1,9 +1,9 @@ -@load base/frameworks/sumstats +@load base/frameworks/sumstats/main module SumStats; export { - redef enum Calculation += { + redef enum Calculation += { ## Find the maximum value. MAX }; @@ -18,7 +18,7 @@ hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal) { if ( MAX in r$apply ) { - if ( ! rv?$max ) + if ( ! rv?$max ) rv$max = val; else if ( val > rv$max ) rv$max = val; diff --git a/scripts/base/frameworks/sumstats/plugins/min.bro b/scripts/base/frameworks/sumstats/plugins/min.bro index 2940b34a9b..95d492f428 100644 --- a/scripts/base/frameworks/sumstats/plugins/min.bro +++ b/scripts/base/frameworks/sumstats/plugins/min.bro @@ -1,9 +1,9 @@ -@load base/frameworks/sumstats +@load base/frameworks/sumstats/main module SumStats; export { - redef enum Calculation += { + redef enum Calculation += { ## Find the minimum value. MIN }; @@ -18,7 +18,7 @@ hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal) { if ( MIN in r$apply ) { - if ( ! rv?$min ) + if ( ! rv?$min ) rv$min = val; else if ( val < rv$min ) rv$min = val; @@ -33,4 +33,4 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal) result$min = rv1$min; else if ( rv2?$min ) result$min = rv2$min; - } \ No newline at end of file + } diff --git a/scripts/base/frameworks/sumstats/plugins/sample.bro b/scripts/base/frameworks/sumstats/plugins/sample.bro index 91a295775d..dc2f438c79 100644 --- a/scripts/base/frameworks/sumstats/plugins/sample.bro +++ b/scripts/base/frameworks/sumstats/plugins/sample.bro @@ -1,4 +1,4 @@ -@load base/frameworks/sumstats +@load base/frameworks/sumstats/main @load base/utils/queue module SumStats; @@ -10,10 +10,8 @@ export { }; redef record ResultVal += { - ## This is the queue where samples - ## are maintained. Use the - ## :bro:see:`SumStats::get_samples` function - ## to get a vector of the samples. + ## This is the queue where samples are maintained. Use the + ## :bro:see:`SumStats::get_samples` function to get a vector of the samples. samples: Queue::Queue &optional; }; @@ -48,4 +46,4 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal) result$samples = rv1$samples; else if ( rv2?$samples ) result$samples = rv2$samples; - } \ No newline at end of file + } diff --git a/scripts/base/frameworks/sumstats/plugins/std-dev.bro b/scripts/base/frameworks/sumstats/plugins/std-dev.bro index cbe9197581..0f32e25a68 100644 --- a/scripts/base/frameworks/sumstats/plugins/std-dev.bro +++ b/scripts/base/frameworks/sumstats/plugins/std-dev.bro @@ -1,10 +1,10 @@ +@load base/frameworks/sumstats/main @load ./variance -@load base/frameworks/sumstats module SumStats; export { - redef enum Calculation += { + redef enum Calculation += { ## Find the standard deviation of the values. STD_DEV }; diff --git a/scripts/base/frameworks/sumstats/plugins/sum.bro b/scripts/base/frameworks/sumstats/plugins/sum.bro index 18056d14fb..db2246742b 100644 --- a/scripts/base/frameworks/sumstats/plugins/sum.bro +++ b/scripts/base/frameworks/sumstats/plugins/sum.bro @@ -1,9 +1,9 @@ -@load base/frameworks/sumstats +@load base/frameworks/sumstats/main module SumStats; export { - redef enum Calculation += { + redef enum Calculation += { ## Sums the values given. For string values, ## this will be the number of strings given. SUM @@ -48,4 +48,4 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal) if ( rv2?$sum ) result$sum += rv2$sum; } - } \ No newline at end of file + } diff --git a/scripts/base/frameworks/sumstats/plugins/unique.bro b/scripts/base/frameworks/sumstats/plugins/unique.bro index f44da07e07..ef62caaffa 100644 --- a/scripts/base/frameworks/sumstats/plugins/unique.bro +++ b/scripts/base/frameworks/sumstats/plugins/unique.bro @@ -1,9 +1,9 @@ -@load base/frameworks/sumstats +@load base/frameworks/sumstats/main module SumStats; export { - redef enum Calculation += { + redef enum Calculation += { ## Calculate the number of unique values. UNIQUE }; @@ -16,8 +16,8 @@ export { } redef record ResultVal += { - # Internal use only. This is not meant to be publically available - # because we don't want to trust that we can inspect the values + # Internal use only. This is not meant to be publically available + # because we don't want to trust that we can inspect the values # since we will like move to a probalistic data structure in the future. # TODO: in the future this will optionally be a hyperloglog structure unique_vals: set[Observation] &optional; @@ -27,7 +27,7 @@ hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal) { if ( UNIQUE in r$apply ) { - if ( ! rv?$unique_vals ) + if ( ! rv?$unique_vals ) rv$unique_vals=set(); add rv$unique_vals[obs]; rv$unique = |rv$unique_vals|; @@ -40,7 +40,7 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal) { if ( rv1?$unique_vals ) result$unique_vals = rv1$unique_vals; - + if ( rv2?$unique_vals ) if ( ! result?$unique_vals ) result$unique_vals = rv2$unique_vals; @@ -50,4 +50,4 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal) result$unique = |result$unique_vals|; } - } \ No newline at end of file + } diff --git a/scripts/base/frameworks/sumstats/plugins/variance.bro b/scripts/base/frameworks/sumstats/plugins/variance.bro index 9aadd58bdd..773c7d697c 100644 --- a/scripts/base/frameworks/sumstats/plugins/variance.bro +++ b/scripts/base/frameworks/sumstats/plugins/variance.bro @@ -1,10 +1,10 @@ +@load base/frameworks/sumstats/main @load ./average -@load base/frameworks/sumstats module SumStats; export { - redef enum Calculation += { + redef enum Calculation += { ## Find the variance of the values. VARIANCE }; @@ -66,4 +66,4 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal) result$prev_avg = rv2$prev_avg; calc_variance(result); - } \ No newline at end of file + } diff --git a/scripts/base/protocols/ssh/main.bro b/scripts/base/protocols/ssh/main.bro index f4112efde0..d069486e67 100644 --- a/scripts/base/protocols/ssh/main.bro +++ b/scripts/base/protocols/ssh/main.bro @@ -1,7 +1,7 @@ -##! Base SSH analysis script. The heuristic to blindly determine success or +##! Base SSH analysis script. The heuristic to blindly determine success or ##! failure for SSH connections is implemented here. At this time, it only ##! uses the size of the data being returned from the server to make the -##! heuristic determination about success of the connection. +##! heuristic determination about success of the connection. ##! Requires that :bro:id:`use_conn_size_analyzer` is set to T! The heuristic ##! is not attempted if the connection size analyzer isn't enabled. @@ -17,7 +17,7 @@ module SSH; export { ## The SSH protocol logging stream identifier. redef enum Log::ID += { LOG }; - + type Info: record { ## Time when the SSH connection began. ts: time &log; @@ -26,9 +26,9 @@ export { ## The connection's 4-tuple of endpoint addresses/ports. id: conn_id &log; ## Indicates if the login was heuristically guessed to be "success", - ## "failure", or "undetermined". + ## "failure", or "undetermined". status: string &log &default="undetermined"; - ## Direction of the connection. If the client was a local host + ## Direction of the connection. If the client was a local host ## logging into an external host, this would be OUTBOUND. INBOUND ## would be set for the opposite situation. # TODO: handle local-local and remote-remote better. @@ -38,33 +38,33 @@ export { ## Software string from the server. server: string &log &optional; ## Amount of data returned from the server. This is currently - ## the only measure of the success heuristic and it is logged to + ## the only measure of the success heuristic and it is logged to ## assist analysts looking at the logs to make their own determination ## about the success on a case-by-case basis. resp_size: count &log &default=0; - + ## Indicate if the SSH session is done being watched. done: bool &default=F; }; - - ## The size in bytes of data sent by the server at which the SSH + + ## The size in bytes of data sent by the server at which the SSH ## connection is presumed to be successful. const authentication_data_size = 4000 &redef; - + ## If true, we tell the event engine to not look at further data ## packets after the initial SSH handshake. Helps with performance ## (especially with large file transfers) but precludes some ## kinds of analyses. const skip_processing_after_detection = F &redef; - + ## Event that is generated when the heuristic thinks that a login ## was successful. global heuristic_successful_login: event(c: connection); - + ## Event that is generated when the heuristic thinks that a login ## failed. global heuristic_failed_login: event(c: connection); - + ## Event that can be handled to access the :bro:type:`SSH::Info` ## record as it is sent on to the logging framework. global log_ssh: event(rec: Info); @@ -102,21 +102,21 @@ function check_ssh_connection(c: connection, done: bool) # If already done watching this connection, just return. if ( c$ssh$done ) return; - + if ( done ) { - # If this connection is done, then we can look to see if + # If this connection is done, then we can look to see if # this matches the conditions for a failed login. Failed # logins are only detected at connection state removal. - if ( # Require originators to have sent at least 50 bytes. + if ( # Require originators to have sent at least 50 bytes. c$orig$size > 50 && # Responders must be below 4000 bytes. - c$resp$size < 4000 && + c$resp$size < 4000 && # Responder must have sent fewer than 40 packets. c$resp$num_pkts < 40 && # If there was a content gap we can't reliably do this heuristic. - c?$conn && c$conn$missed_bytes == 0)# && + c?$conn && c$conn$missed_bytes == 0)# && # Only "normal" connections can count. #c$conn?$conn_state && c$conn$conn_state in valid_states ) { @@ -147,13 +147,13 @@ function check_ssh_connection(c: connection, done: bool) # Set the direction for the log. c$ssh$direction = Site::is_local_addr(c$id$orig_h) ? OUTBOUND : INBOUND; - + # Set the "done" flag to prevent the watching event from rescheduling # after detection is done. c$ssh$done=T; Log::write(SSH::LOG, c$ssh); - + if ( skip_processing_after_detection ) { # Stop watching this connection, we don't care about it anymore. @@ -186,12 +186,12 @@ event ssh_server_version(c: connection, version: string) &priority=5 set_session(c); c$ssh$server = version; } - + event ssh_client_version(c: connection, version: string) &priority=5 { set_session(c); c$ssh$client = version; - + # The heuristic detection for SSH relies on the ConnSize analyzer. # Don't do the heuristics if it's disabled. if ( use_conn_size_analyzer ) diff --git a/scripts/base/utils/queue.bro b/scripts/base/utils/queue.bro index ed45b034f5..11e85f229d 100644 --- a/scripts/base/utils/queue.bro +++ b/scripts/base/utils/queue.bro @@ -6,7 +6,7 @@ export { ## Settings for initializing the queue. type Settings: record { ## If a maximum length is set for the queue - ## it will maintain itself at that + ## it will maintain itself at that ## maximum length automatically. max_len: count &optional; }; @@ -15,17 +15,17 @@ export { type Queue: record {}; ## Initialize a queue record structure. - ## + ## ## s: A :bro:record:`Settings` record configuring the queue. ## ## Returns: An opaque queue record. global init: function(s: Settings): Queue; ## Put a string onto the beginning of a queue. - ## + ## ## q: The queue to put the value into. - ## - ## val: The value to insert into the queue. + ## + ## val: The value to insert into the queue. global put: function(q: Queue, val: any); ## Get a string from the end of a queue. @@ -35,29 +35,29 @@ export { ## Returns: The value gotten from the queue. global get: function(q: Queue): any; - ## Merge two queue's together. If any settings are applied + ## Merge two queue's together. If any settings are applied ## to the queues, the settings from q1 are used for the new ## merged queue. - ## + ## ## q1: The first queue. Settings are taken from here. ## ## q2: The second queue. - ## + ## ## Returns: A new queue from merging the other two together. global merge: function(q1: Queue, q2: Queue): Queue; ## Get the number of items in a queue. - ## + ## ## q: The queue. ## ## Returns: The length of the queue. global len: function(q: Queue): count; - + ## Get the contents of the queue as a vector. - ## + ## ## q: The queue. ## - ## ret: A vector containing the + ## ret: A vector containing the ## current contents of q as the type of ret. global get_vector: function(q: Queue, ret: vector of any); @@ -130,7 +130,7 @@ function get_vector(q: Queue, ret: vector of any) local i = q$bottom; local j = 0; # Really dumb hack, this is only to provide - # the iteration for the correct number of + # the iteration for the correct number of # values in q$vals. for ( ignored_val in q$vals ) { diff --git a/scripts/base/utils/time.bro b/scripts/base/utils/time.bro index abae46c144..2e3788e681 100644 --- a/scripts/base/utils/time.bro +++ b/scripts/base/utils/time.bro @@ -1,6 +1,6 @@ ## Given an interval, returns a string of the form 3m34s to -## give a minimalized human readable string for the minutes +## give a minimalized human readable string for the minutes ## and seconds represented by the interval. function duration_to_mins_secs(dur: interval): string { diff --git a/scripts/policy/misc/app-metrics.bro b/scripts/policy/misc/app-metrics.bro index ec2e8f8d48..3df38ad8ad 100644 --- a/scripts/policy/misc/app-metrics.bro +++ b/scripts/policy/misc/app-metrics.bro @@ -36,9 +36,9 @@ event bro_init() &priority=3 local r1: SumStats::Reducer = [$stream="apps.bytes", $apply=set(SumStats::SUM)]; local r2: SumStats::Reducer = [$stream="apps.hits", $apply=set(SumStats::UNIQUE)]; - SumStats::create([$epoch=break_interval, + SumStats::create([$epoch=break_interval, $reducers=set(r1, r2), - $epoch_finished(data: SumStats::ResultTable) = + $epoch_finished(data: SumStats::ResultTable) = { local l: Info; l$ts = network_time(); @@ -67,12 +67,12 @@ function add_sumstats(id: conn_id, hostname: string, size: count) SumStats::observe("apps.bytes", [$str="facebook"], [$num=size]); SumStats::observe("apps.hits", [$str="facebook"], [$str=cat(id$orig_h)]); } - else if ( /\.google\.com$/ in hostname && size > 20 ) + else if ( /\.google\.com$/ in hostname && size > 20 ) { SumStats::observe("apps.bytes", [$str="google"], [$num=size]); SumStats::observe("apps.hits", [$str="google"], [$str=cat(id$orig_h)]); } - else if ( /\.nflximg\.com$/ in hostname && size > 200*1024 ) + else if ( /\.nflximg\.com$/ in hostname && size > 200*1024 ) { SumStats::observe("apps.bytes", [$str="netflix"], [$num=size]); SumStats::observe("apps.hits", [$str="netflix"], [$str=cat(id$orig_h)]); diff --git a/scripts/policy/misc/detect-traceroute/main.bro b/scripts/policy/misc/detect-traceroute/main.bro index 9ac0f5c2f9..c194d03e13 100644 --- a/scripts/policy/misc/detect-traceroute/main.bro +++ b/scripts/policy/misc/detect-traceroute/main.bro @@ -1,7 +1,7 @@ -##! This script detects large number of ICMP Time Exceeded messages heading -##! toward hosts that have sent low TTL packets. -##! It generates a notice when the number of ICMP Time Exceeded -##! messages for a source-destination pair exceeds threshold +##! This script detects a large number of ICMP Time Exceeded messages heading toward +##! hosts that have sent low TTL packets. It generates a notice when the number of +##! ICMP Time Exceeded messages for a source-destination pair exceeds a +##! threshold. @load base/frameworks/sumstats @load base/frameworks/signatures @load-sigs ./detect-low-ttls.sig @@ -22,10 +22,10 @@ export { ## By default this script requires that any host detected running traceroutes ## first send low TTL packets (TTL < 10) to the traceroute destination host. - ## Changing this this setting to `F` will relax the detection a bit by + ## Changing this this setting to `F` will relax the detection a bit by ## solely relying on ICMP time-exceeded messages to detect traceroute. const require_low_ttl_packets = T &redef; - + ## Defines the threshold for ICMP Time Exceeded messages for a src-dst pair. ## This threshold only comes into play after a host is found to be ## sending low ttl packets. @@ -39,11 +39,13 @@ export { ## The log record for the traceroute log. type Info: record { ## Timestamp - ts: time &log; + ts: time &log; ## Address initiaing the traceroute. - src: addr &log; + src: addr &log; ## Destination address of the traceroute. - dst: addr &log; + dst: addr &log; + ## Protocol used for the traceroute. + proto: string &log; }; global log_traceroute: event(rec: Traceroute::Info); @@ -59,7 +61,7 @@ event bro_init() &priority=5 $reducers=set(r1, r2), $threshold_val(key: SumStats::Key, result: SumStats::Result) = { - # Give a threshold value of zero depending on if the host + # Give a threshold value of zero depending on if the host # sends a low ttl packet. if ( require_low_ttl_packets && result["traceroute.low_ttl_packet"]$sum == 0 ) return 0; @@ -69,14 +71,15 @@ event bro_init() &priority=5 $threshold=icmp_time_exceeded_threshold, $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = { - local parts = split1(key$str, /-/); + local parts = split_n(key$str, /-/, F, 2); local src = to_addr(parts[1]); local dst = to_addr(parts[2]); - Log::write(LOG, [$ts=network_time(), $src=src, $dst=dst]); + local proto = parts[3]; + Log::write(LOG, [$ts=network_time(), $src=src, $dst=dst, $proto=proto]); NOTICE([$note=Traceroute::Detected, - $msg=fmt("%s seems to be running traceroute", src), - $src=src, $dst=dst, - $identifier=cat(src)]); + $msg=fmt("%s seems to be running traceroute using %s", src, proto), + $src=src, + $identifier=cat(src,proto)]); }]); } @@ -84,10 +87,12 @@ event bro_init() &priority=5 event signature_match(state: signature_state, msg: string, data: string) { if ( state$sig_id == /traceroute-detector.*/ ) - SumStats::observe("traceroute.low_ttl_packet", [$str=cat(state$conn$id$orig_h,"-",state$conn$id$resp_h)], [$num=1]); + { + SumStats::observe("traceroute.low_ttl_packet", [$str=cat(state$conn$id$orig_h,"-",state$conn$id$resp_h,"-",get_port_transport_proto(state$conn$id$resp_p))], [$num=1]); + } } event icmp_time_exceeded(c: connection, icmp: icmp_conn, code: count, context: icmp_context) { - SumStats::observe("traceroute.time_exceeded", [$str=cat(context$id$orig_h,"-",context$id$resp_h)], [$str=cat(c$id$orig_h)]); + SumStats::observe("traceroute.time_exceeded", [$str=cat(context$id$orig_h,"-",context$id$resp_h,"-",get_port_transport_proto(context$id$resp_p))], [$str=cat(c$id$orig_h)]); } diff --git a/scripts/policy/misc/scan.bro b/scripts/policy/misc/scan.bro index 508e9316a8..f3dcaf2291 100644 --- a/scripts/policy/misc/scan.bro +++ b/scripts/policy/misc/scan.bro @@ -13,36 +13,39 @@ module Scan; export { redef enum Notice::Type += { - ## Address scans detect that a host appears to be scanning some number - ## of hosts on a single port. This notice is generated when more than - ## :bro:id:`addr_scan_threshold` unique hosts are seen over the - ## previous :bro:id:`addr_scan_interval` time range. + ## Address scans detect that a host appears to be scanning some number of + ## destinations on a single port. This notice is generated when more than + ## :bro:id:`addr_scan_threshold` unique hosts are seen over the previous + ## :bro:id:`addr_scan_interval` time range. Address_Scan, - ## Port scans detect that an attacking host appears to be scanning a - ## single victim host on several ports. This notice is generated when - ## an attacking host attempts to connect to :bro:id:`port_scan_threshold` - ## unique ports on a single host over the previous + + ## Port scans detect that an attacking host appears to be scanning a + ## single victim host on several ports. This notice is generated when + ## an attacking host attempts to connect to :bro:id:`port_scan_threshold` + ## unique ports on a single host over the previous ## :bro:id:`port_scan_interval` time range. Port_Scan, }; - ## Failed connection attempts are tracked over this time interval for the address - ## scan detection. A higher interval will detect slower scanners, but may - ## also yield more false positives. + ## Failed connection attempts are tracked over this time interval for the address + ## scan detection. A higher interval will detect slower scanners, but may also + ## yield more false positives. const addr_scan_interval = 5min &redef; - ## Failed connection attempts are tracked over this time interval for the port - ## scan detection. A higher interval will detect slower scanners, but may - ## also yield more false positives. + + ## Failed connection attempts are tracked over this time interval for the port scan + ## detection. A higher interval will detect slower scanners, but may also yield + ## more false positives. const port_scan_interval = 5min &redef; - ## The threshold of a unique number of hosts a scanning host has to have failed + ## The threshold of a unique number of hosts a scanning host has to have failed ## connections with on a single port. const addr_scan_threshold = 25 &redef; + ## The threshold of a number of unique ports a scanning host has to have failed ## connections with on a single victim host. const port_scan_threshold = 15 &redef; - ## Custom thresholds based on service for address scan. This is primarily + ## Custom thresholds based on service for address scan. This is primarily ## useful for setting reduced thresholds for specific ports. const addr_scan_custom_thresholds: table[port] of count &redef; @@ -73,14 +76,14 @@ event bro_init() &priority=5 $sub=side, $msg=message, $identifier=cat(key$host)]); - }]); + }]); # Note: port scans are tracked similar to: table[src_ip, dst_ip] of set(port); local r2: SumStats::Reducer = [$stream="scan.port.fail", $apply=set(SumStats::UNIQUE)]; SumStats::create([$epoch=port_scan_interval, $reducers=set(r2), $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { + { return double_to_count(result["scan.port.fail"]$unique); }, $threshold=port_scan_threshold, @@ -90,13 +93,13 @@ event bro_init() &priority=5 local side = Site::is_local_addr(key$host) ? "local" : "remote"; local dur = duration_to_mins_secs(r$end-r$begin); local message = fmt("%s scanned at least %d unique ports of host %s in %s", key$host, r$unique, key$str, dur); - NOTICE([$note=Port_Scan, + NOTICE([$note=Port_Scan, $src=key$host, $dst=to_addr(key$str), $sub=side, $msg=message, $identifier=cat(key$host)]); - }]); + }]); } function add_sumstats(id: conn_id, reverse: bool) @@ -111,7 +114,7 @@ function add_sumstats(id: conn_id, reverse: bool) victim = id$orig_h; scanned_port = id$orig_p; } - + if ( hook Scan::addr_scan_policy(scanner, victim, scanned_port) ) SumStats::observe("scan.addr.fail", [$host=scanner, $str=cat(scanned_port)], [$str=cat(victim)]); @@ -121,7 +124,7 @@ function add_sumstats(id: conn_id, reverse: bool) function is_failed_conn(c: connection): bool { - # Sr || ( (hR || ShR) && (data not sent in any direction) ) + # Sr || ( (hR || ShR) && (data not sent in any direction) ) if ( (c$orig$state == TCP_SYN_SENT && c$resp$state == TCP_RESET) || (((c$orig$state == TCP_RESET && c$resp$state == TCP_SYN_ACK_SENT) || (c$orig$state == TCP_RESET && c$resp$state == TCP_ESTABLISHED && "S" in c$history ) @@ -134,7 +137,7 @@ function is_failed_conn(c: connection): bool function is_reverse_failed_conn(c: connection): bool { # reverse scan i.e. conn dest is the scanner - # sR || ( (Hr || sHr) && (data not sent in any direction) ) + # sR || ( (Hr || sHr) && (data not sent in any direction) ) if ( (c$resp$state == TCP_SYN_SENT && c$orig$state == TCP_RESET) || (((c$resp$state == TCP_RESET && c$orig$state == TCP_SYN_ACK_SENT) || (c$resp$state == TCP_RESET && c$orig$state == TCP_ESTABLISHED && "s" in c$history ) @@ -144,37 +147,34 @@ function is_reverse_failed_conn(c: connection): bool return F; } -## Generated for an unsuccessful connection attempt. This -## event is raised when an originator unsuccessfully attempted -## to establish a connection. “Unsuccessful” is defined as at least -## tcp_attempt_delay seconds having elapsed since the originator -## first sent a connection establishment packet to the destination -## without seeing a reply. +## Generated for an unsuccessful connection attempt. This +## event is raised when an originator unsuccessfully attempted +## to establish a connection. “Unsuccessful” is defined as at least +## tcp_attempt_delay seconds having elapsed since the originator first sent a +## connection establishment packet to the destination without seeing a reply. event connection_attempt(c: connection) { local is_reverse_scan = F; if ( "H" in c$history ) is_reverse_scan = T; - + add_sumstats(c$id, is_reverse_scan); } -## Generated for a rejected TCP connection. This event -## is raised when an originator attempted to setup a TCP -## connection but the responder replied with a RST packet +## Generated for a rejected TCP connection. This event is raised when an originator +## attempted to setup a TCP connection but the responder replied with a RST packet ## denying it. event connection_rejected(c: connection) { local is_reverse_scan = F; if ( "s" in c$history ) is_reverse_scan = T; - + add_sumstats(c$id, is_reverse_scan); } -## Generated when an endpoint aborted a TCP connection. -## The event is raised when one endpoint of an *established* -## TCP connection aborted by sending a RST packet. +## Generated when an endpoint aborted a TCP connection. The event is raised when +## one endpoint of an *established* TCP connection aborted by sending a RST packet. event connection_reset(c: connection) { if ( is_failed_conn(c) ) diff --git a/scripts/policy/protocols/ftp/detect-bruteforcing.bro b/scripts/policy/protocols/ftp/detect-bruteforcing.bro index e6c44ddb64..21c9c403c7 100644 --- a/scripts/policy/protocols/ftp/detect-bruteforcing.bro +++ b/scripts/policy/protocols/ftp/detect-bruteforcing.bro @@ -1,3 +1,5 @@ +##! FTP brute-forcing detector, triggering when too many rejected usernames or +##! failed passwords have occured from a single address. @load base/protocols/ftp @load base/frameworks/sumstats @@ -7,13 +9,13 @@ module FTP; export { - redef enum Notice::Type += { + redef enum Notice::Type += { ## Indicates a host bruteforcing FTP logins by watching for too many ## rejected usernames or failed passwords. Bruteforcing }; - ## How many rejected usernames or passwords are required before being + ## How many rejected usernames or passwords are required before being ## considered to be bruteforcing. const bruteforce_threshold = 20 &redef; @@ -29,17 +31,17 @@ event bro_init() SumStats::create([$epoch=bruteforce_measurement_interval, $reducers=set(r1), $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { + { return result["ftp.failed_auth"]$num; }, $threshold=bruteforce_threshold, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = { local r = result["ftp.failed_auth"]; local dur = duration_to_mins_secs(r$end-r$begin); local plural = r$unique>1 ? "s" : ""; local message = fmt("%s had %d failed logins on %d FTP server%s in %s", key$host, r$num, r$unique, plural, dur); - NOTICE([$note=FTP::Bruteforcing, + NOTICE([$note=FTP::Bruteforcing, $src=key$host, $msg=message, $identifier=cat(key$host)]); @@ -54,4 +56,4 @@ event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) if ( FTP::parse_ftp_reply_code(code)$x == 5 ) SumStats::observe("ftp.failed_auth", [$host=c$id$orig_h], [$str=cat(c$id$resp_h)]); } - } \ No newline at end of file + } diff --git a/scripts/policy/protocols/http/detect-sqli.bro b/scripts/policy/protocols/http/detect-sqli.bro index daec0b0fb0..11dba0dc46 100644 --- a/scripts/policy/protocols/http/detect-sqli.bro +++ b/scripts/policy/protocols/http/detect-sqli.bro @@ -14,22 +14,22 @@ export { ## it. This is tracked by IP address as opposed to hostname. SQL_Injection_Victim, }; - + redef enum Tags += { ## Indicator of a URI based SQL injection attack. URI_SQLI, - ## Indicator of client body based SQL injection attack. This is + ## Indicator of client body based SQL injection attack. This is ## typically the body content of a POST request. Not implemented yet. POST_SQLI, ## Indicator of a cookie based SQL injection attack. Not implemented yet. COOKIE_SQLI, }; - + ## Defines the threshold that determines if an SQL injection attack - ## is ongoing based on the number of requests that appear to be SQL + ## is ongoing based on the number of requests that appear to be SQL ## injection attacks. const sqli_requests_threshold = 50 &redef; - + ## Interval at which to watch for the ## :bro:id:`HTTP::sqli_requests_threshold` variable to be crossed. ## At the end of each interval the counter is reset. @@ -41,7 +41,7 @@ export { const collect_SQLi_samples = 5 &redef; ## Regular expression is used to match URI based SQL injections. - const match_sql_injection_uri = + const match_sql_injection_uri = /[\?&][^[:blank:]\x00-\x37\|]+?=[\-[:alnum:]%]+([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x37]|\/\*.*?\*\/|\)?;)+.*?([hH][aA][vV][iI][nN][gG]|[uU][nN][iI][oO][nN]|[eE][xX][eE][cC]|[sS][eE][lL][eE][cC][tT]|[dD][eE][lL][eE][tT][eE]|[dD][rR][oO][pP]|[dD][eE][cC][lL][aA][rR][eE]|[cC][rR][eE][aA][tT][eE]|[iI][nN][sS][eE][rR][tT])([[:blank:]\x00-\x37]|\/\*.*?\*\/)+/ | /[\?&][^[:blank:]\x00-\x37\|]+?=[\-0-9%]+([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]?([[:blank:]\x00-\x37]|\/\*.*?\*\/|\)?;)+([xX]?[oO][rR]|[nN]?[aA][nN][dD])([[:blank:]\x00-\x37]|\/\*.*?\*\/)+['"]?(([^a-zA-Z&]+)?=|[eE][xX][iI][sS][tT][sS])/ | /[\?&][^[:blank:]\x00-\x37]+?=[\-0-9%]*([[:blank:]\x00-\x37]|\/\*.*?\*\/)*['"]([[:blank:]\x00-\x37]|\/\*.*?\*\/)*(-|=|\+|\|\|)([[:blank:]\x00-\x37]|\/\*.*?\*\/)*([0-9]|\(?[cC][oO][nN][vV][eE][rR][tT]|[cC][aA][sS][tT])/ @@ -60,18 +60,18 @@ function format_sqli_samples(samples: vector of SumStats::Observation): string event bro_init() &priority=3 { - # Add filters to the metrics so that the metrics framework knows how to + # Add filters to the metrics so that the metrics framework knows how to # determine when it looks like an actual attack and how to respond when # thresholds are crossed. local r1: SumStats::Reducer = [$stream="http.sqli.attacker", $apply=set(SumStats::SUM), $samples=collect_SQLi_samples]; SumStats::create([$epoch=sqli_requests_interval, $reducers=set(r1), $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { + { return double_to_count(result["http.sqli.attacker"]$sum); }, $threshold=sqli_requests_threshold, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = { local r = result["http.sqli.attacker"]; NOTICE([$note=SQL_Injection_Attacker, @@ -85,11 +85,11 @@ event bro_init() &priority=3 SumStats::create([$epoch=sqli_requests_interval, $reducers=set(r2), $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { + { return double_to_count(result["http.sqli.victim"]$sum); }, $threshold=sqli_requests_threshold, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = { local r = result["http.sqli.victim"]; NOTICE([$note=SQL_Injection_Victim, @@ -106,7 +106,7 @@ event http_request(c: connection, method: string, original_URI: string, if ( match_sql_injection_uri in unescaped_URI ) { add c$http$tags[URI_SQLI]; - + SumStats::observe("http.sqli.attacker", [$host=c$id$orig_h], [$str=original_URI]); SumStats::observe("http.sqli.victim", [$host=c$id$resp_h], [$str=original_URI]); } diff --git a/scripts/policy/protocols/ssh/detect-bruteforcing.bro b/scripts/policy/protocols/ssh/detect-bruteforcing.bro index 82c0bb0f08..309905e939 100644 --- a/scripts/policy/protocols/ssh/detect-bruteforcing.bro +++ b/scripts/policy/protocols/ssh/detect-bruteforcing.bro @@ -10,7 +10,7 @@ module SSH; export { redef enum Notice::Type += { - ## Indicates that a host has been identified as crossing the + ## Indicates that a host has been identified as crossing the ## :bro:id:`SSH::password_guesses_limit` threshold with heuristically ## determined failed logins. Password_Guessing, @@ -24,7 +24,7 @@ export { ## An indicator of the login for the intel framework. SSH::SUCCESSFUL_LOGIN, }; - + ## The number of failed SSH connections before a host is designated as ## guessing passwords. const password_guesses_limit = 30 &redef; @@ -33,9 +33,9 @@ export { ## model of a password guesser. const guessing_timeout = 30 mins &redef; - ## This value can be used to exclude hosts or entire networks from being + ## This value can be used to exclude hosts or entire networks from being ## tracked as potential "guessers". There are cases where the success - ## heuristic fails and this acts as the whitelist. The index represents + ## heuristic fails and this acts as the whitelist. The index represents ## client subnets and the yield value represents server subnets. const ignore_guessers: table[subnet] of subnet &redef; } @@ -46,21 +46,21 @@ event bro_init() SumStats::create([$epoch=guessing_timeout, $reducers=set(r1), $threshold_val(key: SumStats::Key, result: SumStats::Result) = - { + { return double_to_count(result["ssh.login.failure"]$sum); }, $threshold=password_guesses_limit, - $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = + $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = { local r = result["ssh.login.failure"]; # Generate the notice. - NOTICE([$note=Password_Guessing, + NOTICE([$note=Password_Guessing, $msg=fmt("%s appears to be guessing SSH passwords (seen in %d connections).", key$host, r$num), $src=key$host, $identifier=cat(key$host)]); # Insert the guesser into the intel framework. Intel::insert([$host=key$host, - $meta=[$source="local", + $meta=[$source="local", $desc=fmt("Bro observed %d apparently failed SSH connections.", r$num)]]); }]); } @@ -68,7 +68,7 @@ event bro_init() event SSH::heuristic_successful_login(c: connection) { local id = c$id; - + Intel::seen([$host=id$orig_h, $conn=c, $where=SSH::SUCCESSFUL_LOGIN]); @@ -77,8 +77,8 @@ event SSH::heuristic_successful_login(c: connection) event SSH::heuristic_failed_login(c: connection) { local id = c$id; - - # Add data to the FAILED_LOGIN metric unless this connection should + + # Add data to the FAILED_LOGIN metric unless this connection should # be ignored. if ( ! (id$orig_h in ignore_guessers && id$resp_h in ignore_guessers[id$orig_h]) ) diff --git a/src/NetVar.cc b/src/NetVar.cc index 248ae15e1a..012e4a85bc 100644 --- a/src/NetVar.cc +++ b/src/NetVar.cc @@ -239,6 +239,11 @@ TableType* record_field_table; StringVal* cmd_line_bpf_filter; +OpaqueType* md5_type; +OpaqueType* sha1_type; +OpaqueType* sha256_type; +OpaqueType* entropy_type; + #include "const.bif.netvar_def" #include "types.bif.netvar_def" #include "event.bif.netvar_def" @@ -298,6 +303,11 @@ void init_general_global_var() cmd_line_bpf_filter = internal_val("cmd_line_bpf_filter")->AsStringVal(); + + md5_type = new OpaqueType("md5"); + sha1_type = new OpaqueType("sha1"); + sha256_type = new OpaqueType("sha256"); + entropy_type = new OpaqueType("entropy"); } void init_net_var() @@ -346,7 +356,7 @@ void init_net_var() opt_internal_int("tcp_excessive_data_without_further_acks"); x509_type = internal_type("X509")->AsRecordType(); - + socks_address = internal_type("SOCKS::Address")->AsRecordType(); non_analyzed_lifetime = opt_internal_double("non_analyzed_lifetime"); diff --git a/src/NetVar.h b/src/NetVar.h index 2561fa0ad9..d7590b20e7 100644 --- a/src/NetVar.h +++ b/src/NetVar.h @@ -243,6 +243,12 @@ extern TableType* record_field_table; extern StringVal* cmd_line_bpf_filter; +class OpaqueType; +extern OpaqueType* md5_type; +extern OpaqueType* sha1_type; +extern OpaqueType* sha256_type; +extern OpaqueType* entropy_type; + // Initializes globals that don't pertain to network/event analysis. extern void init_general_global_var(); diff --git a/src/OpaqueVal.cc b/src/OpaqueVal.cc index 54c771b366..39eab973b7 100644 --- a/src/OpaqueVal.cc +++ b/src/OpaqueVal.cc @@ -1,4 +1,5 @@ #include "OpaqueVal.h" +#include "NetVar.h" #include "Reporter.h" #include "Serializer.h" #include "HyperLogLog.h" @@ -144,6 +145,10 @@ bool HashVal::DoUnserialize(UnserialInfo* info) return UNSERIALIZE(&valid); } +MD5Val::MD5Val() : HashVal(md5_type) + { + } + void MD5Val::digest(val_list& vlist, u_char result[MD5_DIGEST_LENGTH]) { MD5_CTX h; @@ -261,6 +266,10 @@ bool MD5Val::DoUnserialize(UnserialInfo* info) return true; } +SHA1Val::SHA1Val() : HashVal(sha1_type) + { + } + void SHA1Val::digest(val_list& vlist, u_char result[SHA_DIGEST_LENGTH]) { SHA_CTX h; @@ -369,6 +378,10 @@ bool SHA1Val::DoUnserialize(UnserialInfo* info) return true; } +SHA256Val::SHA256Val() : HashVal(sha256_type) + { + } + void SHA256Val::digest(val_list& vlist, u_char result[SHA256_DIGEST_LENGTH]) { SHA256_CTX h; @@ -482,6 +495,9 @@ bool SHA256Val::DoUnserialize(UnserialInfo* info) return true; } +EntropyVal::EntropyVal() : OpaqueVal(entropy_type) + { + } bool EntropyVal::Feed(const void* data, size_t size) { diff --git a/src/OpaqueVal.h b/src/OpaqueVal.h index dd70eaf96b..d0373db1d2 100644 --- a/src/OpaqueVal.h +++ b/src/OpaqueVal.h @@ -54,7 +54,7 @@ public: u_char key[MD5_DIGEST_LENGTH], u_char result[MD5_DIGEST_LENGTH]); - MD5Val() : HashVal(new OpaqueType("md5")) { } + MD5Val(); protected: friend class Val; @@ -73,7 +73,7 @@ class SHA1Val : public HashVal { public: static void digest(val_list& vlist, u_char result[SHA_DIGEST_LENGTH]); - SHA1Val() : HashVal(new OpaqueType("sha1")) { } + SHA1Val(); protected: friend class Val; @@ -92,7 +92,7 @@ class SHA256Val : public HashVal { public: static void digest(val_list& vlist, u_char result[SHA256_DIGEST_LENGTH]); - SHA256Val() : HashVal(new OpaqueType("sha256")) { } + SHA256Val(); protected: friend class Val; @@ -109,7 +109,7 @@ private: class EntropyVal : public OpaqueVal { public: - EntropyVal() : OpaqueVal(new OpaqueType("entropy")) { } + EntropyVal(); bool Feed(const void* data, size_t size); bool Get(double *r_ent, double *r_chisq, double *r_mean, diff --git a/src/TCP.cc b/src/TCP.cc index da977d8157..c291f8e76c 100644 --- a/src/TCP.cc +++ b/src/TCP.cc @@ -566,7 +566,7 @@ void TCP_Analyzer::UpdateInactiveState(double t, else endpoint->SetState(TCP_ENDPOINT_SYN_SENT); - if ( connection_attempt ) + if ( tcp_attempt_delay ) ADD_ANALYZER_TIMER(&TCP_Analyzer::AttemptTimer, t + tcp_attempt_delay, 1, TIMER_TCP_ATTEMPT); @@ -1497,24 +1497,7 @@ void TCP_Analyzer::ExpireTimer(double t) if ( resp->state == TCP_ENDPOINT_INACTIVE ) { - if ( (orig->state == TCP_ENDPOINT_SYN_SENT || - orig->state == TCP_ENDPOINT_SYN_ACK_SENT) ) - { - if ( ! connection_attempt ) - { - // Time out the connection attempt, - // since the AttemptTimer isn't going - // to do it for us, and we don't want - // to clog the data structures with - // old, failed attempts. - Event(connection_timeout); - is_active = 0; - sessions->Remove(Conn()); - return; - } - } - - else if ( orig->state == TCP_ENDPOINT_INACTIVE ) + if ( orig->state == TCP_ENDPOINT_INACTIVE ) { // Nothing ever happened on this connection. // This can occur when we see a trashed diff --git a/src/Val.cc b/src/Val.cc index dd86e71a9e..33b2d0eacd 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -1749,7 +1749,7 @@ Val* TableVal::Default(Val* index) if ( def_val->Type()->Tag() != TYPE_FUNC || same_type(def_val->Type(), Type()->YieldType()) ) - return def_val->Ref(); + return def_attr->AttrExpr()->IsConst() ? def_val->Ref() : def_val->Clone(); const Func* f = def_val->AsFunc(); val_list* vl = new val_list(); diff --git a/testing/btest/Baseline/language.table-default-record/out b/testing/btest/Baseline/language.table-default-record/out new file mode 100644 index 0000000000..aeb44cf221 --- /dev/null +++ b/testing/btest/Baseline/language.table-default-record/out @@ -0,0 +1,7 @@ +0 +0 +0 +0 +{ + +} diff --git a/testing/btest/Baseline/scripts.base.frameworks.sumstats.cluster-intermediate-update/manager-1..stdout b/testing/btest/Baseline/scripts.base.frameworks.sumstats.cluster-intermediate-update/manager-1..stdout index 2a53389dc3..a5428dd3b7 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.sumstats.cluster-intermediate-update/manager-1..stdout +++ b/testing/btest/Baseline/scripts.base.frameworks.sumstats.cluster-intermediate-update/manager-1..stdout @@ -1 +1,3 @@ -A test metric threshold was crossed with a value of: 100.0 +A test metric threshold was crossed with a value of: 101.0 +End of epoch handler was called +101.0 diff --git a/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log index b2a8ef7d4c..8529e18186 100644 --- a/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log +++ b/testing/btest/Baseline/scripts.base.protocols.socks.trace1/socks.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path socks -#open 2012-06-20-17-23-38 +#open 2013-05-02-01-02-50 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version user status request.host request.name request_p bound.host bound.name bound_p #types time string addr port addr port count string string addr string port addr string port -1340213015.276495 UWkUyAuUGXf 10.0.0.55 53994 60.190.189.214 8124 5 - succeeded - www.osnews.com 80 192.168.0.31 - 2688 -#close 2012-06-20-17-28-10 +1340213015.276495 arKYeMETxOg 10.0.0.55 53994 60.190.189.214 8124 5 - succeeded - www.osnews.com 80 192.168.0.31 - 2688 +#close 2013-05-02-01-02-50 diff --git a/testing/btest/Baseline/scripts.policy.frameworks.software.vulnerable/notice.log b/testing/btest/Baseline/scripts.policy.frameworks.software.vulnerable/notice.log index 21b5342a13..f2cf09cab6 100644 --- a/testing/btest/Baseline/scripts.policy.frameworks.software.vulnerable/notice.log +++ b/testing/btest/Baseline/scripts.policy.frameworks.software.vulnerable/notice.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path notice -#open 2013-04-25-18-55-26 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network -#types time string addr port addr port enum enum string string addr addr port count string table[enum] interval bool string string string double double addr string subnet -1366916126.685057 - - - - - - Software::Vulnerable_Version 1.2.3.4 is running Java 1.7.0.15 which is vulnerable. Java 1.7.0.15 1.2.3.4 - - - bro Notice::ACTION_LOG 3600.000000 F - - - - - - - - -1366916126.685057 - - - - - - Software::Vulnerable_Version 1.2.3.5 is running Java 1.6.0.43 which is vulnerable. Java 1.6.0.43 1.2.3.5 - - - bro Notice::ACTION_LOG 3600.000000 F - - - - - - - - -#close 2013-04-25-18-55-26 +#open 2013-04-28-22-36-26 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto note msg sub src dst p n peer_descr actions suppress_for dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude +#types time string addr port addr port enum enum string string addr addr port count string table[enum] interval bool string string string double double +1367188586.649122 - - - - - - Software::Vulnerable_Version 1.2.3.4 is running Java 1.7.0.15 which is vulnerable. Java 1.7.0.15 1.2.3.4 - - - bro Notice::ACTION_LOG 3600.000000 F - - - - - +1367188586.649122 - - - - - - Software::Vulnerable_Version 1.2.3.5 is running Java 1.6.0.43 which is vulnerable. Java 1.6.0.43 1.2.3.5 - - - bro Notice::ACTION_LOG 3600.000000 F - - - - - +#close 2013-04-28-22-36-26 diff --git a/testing/btest/core/leaks/basic-cluster.bro b/testing/btest/core/leaks/basic-cluster.bro index 319368bc6e..d25af55b3f 100644 --- a/testing/btest/core/leaks/basic-cluster.bro +++ b/testing/btest/core/leaks/basic-cluster.bro @@ -6,33 +6,38 @@ # @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks # # @TEST-EXEC: btest-bg-run manager-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro -m %INPUT -# @TEST-EXEC: btest-bg-run proxy-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro -m %INPUT # @TEST-EXEC: sleep 1 -# @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT -# @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro -m -r $TRACES/web.trace --pseudo-realtime %INPUT -# @TEST-EXEC: btest-bg-wait 60 -# @TEST-EXEC: btest-diff manager-1/metrics.log +# @TEST-EXEC: btest-bg-run worker-1 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro -m %INPUT +# @TEST-EXEC: btest-bg-run worker-2 HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro -m %INPUT +# @TEST-EXEC: btest-bg-wait 15 @TEST-START-FILE cluster-layout.bro redef Cluster::nodes = { ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1", "worker-2")], - ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=37758/tcp, $manager="manager-1", $workers=set("worker-1", "worker-2")], - ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth0"], - ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth1"], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $interface="eth1"], }; @TEST-END-FILE redef Log::default_rotation_interval = 0secs; -redef enum Metrics::ID += { - TEST_METRIC, -}; +global n = 0; event bro_init() &priority=5 { - Metrics::add_filter(TEST_METRIC, - [$name="foo-bar", - $break_interval=3secs]); + local r1: SumStats::Reducer = [$stream="test", $apply=set(SumStats::SUM, SumStats::MIN, SumStats::MAX, SumStats::AVERAGE, SumStats::STD_DEV, SumStats::VARIANCE, SumStats::UNIQUE)]; + SumStats::create([$epoch=5secs, + $reducers=set(r1), + $epoch_finished(rt: SumStats::ResultTable) = + { + for ( key in rt ) + { + local r = rt[key]["test"]; + print fmt("Host: %s - num:%d - sum:%.1f - avg:%.1f - max:%.1f - min:%.1f - var:%.1f - std_dev:%.1f - unique:%d", key$host, r$num, r$sum, r$average, r$max, r$min, r$variance, r$std_dev, r$unique); + } + + terminate(); + }]); } event remote_connection_closed(p: event_peer) @@ -41,43 +46,40 @@ event remote_connection_closed(p: event_peer) } global ready_for_data: event(); - -redef Cluster::manager2worker_events += /ready_for_data/; - -@if ( Cluster::local_node_type() == Cluster::WORKER ) +redef Cluster::manager2worker_events += /^ready_for_data$/; event ready_for_data() { - Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3); - Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2); - Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1); + if ( Cluster::node == "worker-1" ) + { + SumStats::observe("test", [$host=1.2.3.4], [$num=34]); + SumStats::observe("test", [$host=1.2.3.4], [$num=30]); + SumStats::observe("test", [$host=6.5.4.3], [$num=1]); + SumStats::observe("test", [$host=7.2.1.5], [$num=54]); + } + if ( Cluster::node == "worker-2" ) + { + SumStats::observe("test", [$host=1.2.3.4], [$num=75]); + SumStats::observe("test", [$host=1.2.3.4], [$num=30]); + SumStats::observe("test", [$host=1.2.3.4], [$num=3]); + SumStats::observe("test", [$host=1.2.3.4], [$num=57]); + SumStats::observe("test", [$host=1.2.3.4], [$num=52]); + SumStats::observe("test", [$host=1.2.3.4], [$num=61]); + SumStats::observe("test", [$host=1.2.3.4], [$num=95]); + SumStats::observe("test", [$host=6.5.4.3], [$num=5]); + SumStats::observe("test", [$host=7.2.1.5], [$num=91]); + SumStats::observe("test", [$host=10.10.10.10], [$num=5]); + } } -@endif - @if ( Cluster::local_node_type() == Cluster::MANAGER ) -global n = 0; global peer_count = 0; - -event Metrics::log_metrics(rec: Metrics::Info) +event remote_connection_handshake_done(p: event_peer) &priority=-5 { - n = n + 1; - if ( n == 3 ) - { - terminate_communication(); - terminate(); - } - } - -event remote_connection_handshake_done(p: event_peer) - { - print p; - peer_count = peer_count + 1; - if ( peer_count == 3 ) - { + ++peer_count; + if ( peer_count == 2 ) event ready_for_data(); - } } @endif diff --git a/testing/btest/coverage/bare-mode-errors.test b/testing/btest/coverage/bare-mode-errors.test index 894c9e67f4..da968d5601 100644 --- a/testing/btest/coverage/bare-mode-errors.test +++ b/testing/btest/coverage/bare-mode-errors.test @@ -3,12 +3,13 @@ # scripts that block after loading, e.g. start listening on a socket. # # Commonly, this test may fail if one forgets to @load some base/ scripts -# when writing a new bro scripts. +# when writing a new bro scripts. Look into "allerrors" to find out +# which script had trouble. # # @TEST-SERIALIZE: comm # # @TEST-EXEC: test -d $DIST/scripts -# @TEST-EXEC: for script in `find $DIST/scripts/ -name \*\.bro -not -path '*/site/*'`; do echo $script; if echo "$script" | egrep -q 'communication/listen|controllee'; then rm -rf load_attempt .bgprocs; btest-bg-run load_attempt bro -b $script; btest-bg-wait -k 2; cat load_attempt/.stderr >>allerrors; else bro -b $script 2>>allerrors; fi done || exit 0 -# @TEST-EXEC: cat allerrors | grep -v "received termination signal" | sort | uniq > unique_errors +# @TEST-EXEC: for script in `find $DIST/scripts/ -name \*\.bro -not -path '*/site/*'`; do echo "=== $script" >>allerrors; if echo "$script" | egrep -q 'communication/listen|controllee'; then rm -rf load_attempt .bgprocs; btest-bg-run load_attempt bro -b $script; btest-bg-wait -k 2; cat load_attempt/.stderr >>allerrors; else bro -b $script 2>>allerrors; fi done || exit 0 +# @TEST-EXEC: cat allerrors | grep -v "received termination signal" | grep -v '===' | sort | uniq > unique_errors # @TEST-EXEC: if [ $(grep -c LibCURL_INCLUDE_DIR-NOTFOUND $BUILD/CMakeCache.txt) -ne 0 ]; then cp unique_errors unique_errors_no_elasticsearch; fi # @TEST-EXEC: if [ $(grep -c LibCURL_INCLUDE_DIR-NOTFOUND $BUILD/CMakeCache.txt) -ne 0 ]; then btest-diff unique_errors_no_elasticsearch; else btest-diff unique_errors; fi diff --git a/testing/btest/language/table-default-record.bro b/testing/btest/language/table-default-record.bro new file mode 100644 index 0000000000..3894f3ac09 --- /dev/null +++ b/testing/btest/language/table-default-record.bro @@ -0,0 +1,24 @@ +# @TEST-EXEC: bro -b %INPUT >out +# @TEST-EXEC: btest-diff out + +type Foo: record { + x: count &default=0; +}; + +global foo: table[count] of Foo = {} &default=[]; + +# returns the &default value as usual +print(foo[0]$x); +print(foo[1]$x); + +# these are essentially no-ops since a copy of the &default value is returned +# by the lookup +foo[0]$x = 0; +foo[1]$x = 1; + +# the &default value isn't modified +print(foo[0]$x); +print(foo[1]$x); + +# table membership isn't modified +print(foo); diff --git a/testing/btest/scripts/base/frameworks/sumstats/cluster-intermediate-update.bro b/testing/btest/scripts/base/frameworks/sumstats/cluster-intermediate-update.bro index 303a0dc852..bed1793721 100644 --- a/testing/btest/scripts/base/frameworks/sumstats/cluster-intermediate-update.bro +++ b/testing/btest/scripts/base/frameworks/sumstats/cluster-intermediate-update.bro @@ -4,7 +4,7 @@ # @TEST-EXEC: sleep 3 # @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT # @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT -# @TEST-EXEC: btest-bg-wait 10 +# @TEST-EXEC: btest-bg-wait 20 # @TEST-EXEC: btest-diff manager-1/.stdout @TEST-START-FILE cluster-layout.bro @@ -20,8 +20,15 @@ redef Log::default_rotation_interval = 0secs; event bro_init() &priority=5 { local r1: SumStats::Reducer = [$stream="test.metric", $apply=set(SumStats::SUM)]; - SumStats::create([$epoch=1hr, + SumStats::create([$epoch=10secs, $reducers=set(r1), + $epoch_finished(data: SumStats::ResultTable) = + { + print "End of epoch handler was called"; + for ( res in data ) + print data[res]["test.metric"]$sum; + terminate(); + }, $threshold_val(key: SumStats::Key, result: SumStats::Result) = { return double_to_count(result["test.metric"]$sum); @@ -30,7 +37,6 @@ event bro_init() &priority=5 $threshold_crossed(key: SumStats::Key, result: SumStats::Result) = { print fmt("A test metric threshold was crossed with a value of: %.1f", result["test.metric"]$sum); - terminate(); }]); } @@ -52,8 +58,13 @@ event remote_connection_handshake_done(p: event_peer) if ( p$descr == "manager-1" ) { if ( Cluster::node == "worker-1" ) + { schedule 0.1sec { do_stats(1) }; + schedule 5secs { do_stats(60) }; + } if ( Cluster::node == "worker-2" ) - schedule 0.5sec { do_stats(99) }; + schedule 0.5sec { do_stats(40) }; } } + +