diff --git a/CHANGES b/CHANGES index a8e742f1bf..aa795ed8e4 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,39 @@ +1.6-dev-1120 | 2011-08-19 19:00:15 -0700 + + * Fix for the CompHash fix. (Robin Sommer) + +1.6-dev-1118 | 2011-08-18 14:11:55 -0700 + + * Fixing key size calculation in composite hash code. (Robin Sommer) + +1.6-dev-1116 | 2011-08-18 10:05:07 -0700 + + * Remove the 'net' type from Bro (addresses #535). + + * Fix H3 assumption of an 8-bit byte/char. (Jon Siwek) + + * Allow reading from interface without additional script arguments. + Explicitly passing in '-' as an additional command line argument + still allows reading a script from stdin. (Jon Siwek) + + * SSH bruteforcing detection now done with metrics framework. (Seth + Hall) + + * Updates for SQL injection attack detection to match the metrics + framework updates. (Seth Hall) + + * Metrics framework now works on cluster setups. (Seth Hall) + + * Reclassifying more DNS manager errors as non-fatal errors. (Robin + Sommer) + + * Fix ConnSize_Analyzer when used in conjunction with connection + compressor. (Gregor Maier) + + * Fix reporter using part of the actual message as a format string. + (Jon Siwek) + 1.6-dev-1095 | 2011-08-13 11:59:07 -0700 * A larger number of script documentation updates. Closes #543. (Jon diff --git a/VERSION b/VERSION index c8e03864f4..89d00dee1a 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.6-dev-1095 +1.6-dev-1120 diff --git a/aux/broccoli b/aux/broccoli index 03e6d398ed..743f10dda8 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 03e6d398edf422140ba9f50e6fabbec33ee2f3cb +Subproject commit 743f10dda8cd5655ea3dc6eb705ff5414ed4f535 diff --git a/aux/broctl b/aux/broctl index c39622855e..cf4ce9dfc5 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit c39622855e3c3a5cc94c7376f86184ed1db1939a +Subproject commit cf4ce9dfc5d6dc4e6d311955eeaec2d679e8669b diff --git a/scripts/base/frameworks/metrics/cluster.bro b/scripts/base/frameworks/metrics/cluster.bro index 94281eb883..91efa98996 100644 --- a/scripts/base/frameworks/metrics/cluster.bro +++ b/scripts/base/frameworks/metrics/cluster.bro @@ -3,39 +3,66 @@ ##! and will be depending on if the cluster framework has been enabled. ##! The goal of this script is to make metric calculation completely and ##! transparently automated when running on a cluster. +##! +##! Events defined here are not exported deliberately because they are meant +##! to be an internal implementation detail. @load base/frameworks/cluster module Metrics; export { - ## This event is sent by the manager in a cluster to initiate the 3 - ## collection of metrics values - global cluster_collect: event(uid: string, id: ID, filter_name: string); - - ## This event is sent by nodes that are collecting metrics after receiving - ## a request for the metric filter from the manager. - global cluster_results: event(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool); - - ## This event is used internally by workers to send result chunks. - global send_data: event(uid: string, id: ID, filter_name: string, data: MetricTable); - ## This value allows a user to decide how large of result groups the ## workers should transmit values. const cluster_send_in_groups_of = 50 &redef; + + ## This is the percent of the full threshold value that needs to be met + ## on a single worker for that worker to send the value to its manager in + ## order for it to request a global view for that value. There is no + ## requirement that the manager requests a global view for the index + ## since it may opt not to if it requested a global view for the index + ## recently. + const cluster_request_global_view_percent = 0.1 &redef; } +## This event is sent by the manager in a cluster to initiate the +## collection of metrics values for a filter. +global cluster_filter_request: event(uid: string, id: ID, filter_name: string); + +## This event is sent by nodes that are collecting metrics after receiving +## a request for the metric filter from the manager. +global cluster_filter_response: event(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool); + +## This event is sent by the manager in a cluster to initiate the +## collection of a single index value from a filter. It's typically +## used to get intermediate updates before the break interval triggers +## to speed detection of a value crossing a threshold. +global cluster_index_request: event(uid: string, id: ID, filter_name: string, index: Index); + +## This event is sent by nodes in response to a +## :bro:id:`cluster_index_request` event. +global cluster_index_response: event(uid: string, id: ID, filter_name: string, index: Index, val: count); + +## This is sent by workers to indicate that they crossed the percent of the +## current threshold by the percentage defined globally in +## :bro:id:`cluster_request_global_view_percent` +global cluster_index_intermediate_response: event(id: Metrics::ID, filter_name: string, index: Metrics::Index, val: count); + +## This event is scheduled internally on workers to send result chunks. +global send_data: event(uid: string, id: ID, filter_name: string, data: MetricTable); + # This is maintained by managers so they can know what data they requested and # when they requested it. global requested_results: table[string] of time = table() &create_expire=5mins; -# TODO: Both of the next variables make the assumption that a value never +# TODO: The next 4 variables make the assumption that a value never # takes longer than 5 minutes to transmit from workers to manager. This needs to # be tunable or self-tuning. These should also be restructured to be # maintained within a single variable. + # This variable is maintained by manager nodes as they collect and aggregate # results. -global collecting_results: table[string, ID, string] of MetricTable &create_expire=5mins; +global filter_results: table[string, ID, string] of MetricTable &create_expire=5mins; # This variable is maintained by manager nodes to track how many "dones" they # collected per collection unique id. Once the number of results for a uid @@ -44,28 +71,47 @@ global collecting_results: table[string, ID, string] of MetricTable &create_expi # TODO: add an &expire_func in case not all results are received. global done_with: table[string] of count &create_expire=5mins &default=0; +# This variable is maintained by managers to track intermediate responses as +# they are getting a global view for a certain index. +global index_requests: table[string, ID, string, Index] of count &create_expire=5mins &default=0; + +# This variable is maintained by all hosts for different purposes. Non-managers +# maintain it to know what indexes they have recently sent as intermediate +# updates so they don't overwhelm their manager. Managers maintain it so they +# don't overwhelm workers with intermediate index requests. The count that is +# yielded is the number of times the percentage threshold has been crossed and +# an intermediate result has been received. The manager may optionally request +# the index again before data expires from here if too many workers are crossing +# the percentage threshold (not implemented yet!). +global recent_global_view_indexes: table[ID, string, Index] of count &create_expire=5mins &default=0; + # Add events to the cluster framework to make this work. -redef Cluster::manager_events += /Metrics::cluster_collect/; -redef Cluster::worker_events += /Metrics::cluster_results/; +redef Cluster::manager_events += /Metrics::cluster_(filter_request|index_request)/; +redef Cluster::worker_events += /Metrics::cluster_(filter_response|index_response|index_intermediate_response)/; -# The metrics collection process can only be done by a manager. -@if ( Cluster::local_node_type() == Cluster::MANAGER ) -event Metrics::log_it(filter: Filter) +@if ( Cluster::local_node_type() != Cluster::MANAGER ) +# This is done on all non-manager node types in the event that a metric is +# being collected somewhere other than a worker. +function data_added(filter: Filter, index: Index, val: count) { - local uid = unique_id(""); + # If an intermediate update for this value was sent recently, don't send + # it again. + if ( [filter$id, filter$name, index] in recent_global_view_indexes ) + return; + + # If val is 5 and global view % is 0.1 (10%), pct_val will be 50. If that + # crosses the full threshold then it's a candidate to send as an + # intermediate update. + local pct_val = double_to_count(val / cluster_request_global_view_percent); - # Set some tracking variables. - requested_results[uid] = network_time(); - collecting_results[uid, filter$id, filter$name] = table(); - - # Request data from peers. - event Metrics::cluster_collect(uid, filter$id, filter$name); - # Schedule the log_it event for the next break period. - schedule filter$break_interval { Metrics::log_it(filter) }; + if ( check_notice(filter, index, pct_val) ) + { + # kick off intermediate update + event Metrics::cluster_index_intermediate_response(filter$id, filter$name, index, val); + + ++recent_global_view_indexes[filter$id, filter$name, index]; + } } -@endif - -@if ( Cluster::local_node_type() == Cluster::WORKER ) event Metrics::send_data(uid: string, id: ID, filter_name: string, data: MetricTable) { @@ -89,31 +135,99 @@ event Metrics::send_data(uid: string, id: ID, filter_name: string, data: MetricT if ( |data| == 0 ) done = T; - event Metrics::cluster_results(uid, id, filter_name, local_data, done); + event Metrics::cluster_filter_response(uid, id, filter_name, local_data, done); if ( ! done ) event Metrics::send_data(uid, id, filter_name, data); } -event Metrics::cluster_collect(uid: string, id: ID, filter_name: string) +event Metrics::cluster_filter_request(uid: string, id: ID, filter_name: string) { - #print fmt("WORKER %s: received the cluster_collect event.", Cluster::node); + #print fmt("WORKER %s: received the cluster_filter_request event.", Cluster::node); + # Initiate sending all of the data for the requested filter. event Metrics::send_data(uid, id, filter_name, store[id, filter_name]); - + # Lookup the actual filter and reset it, the reference to the data # currently stored will be maintained interally by the send_data event. reset(filter_store[id, filter_name]); } + +event Metrics::cluster_index_request(uid: string, id: ID, filter_name: string, index: Index) + { + local val=0; + if ( index in store[id, filter_name] ) + val = store[id, filter_name][index]; + + # fmt("WORKER %s: received the cluster_index_request event for %s=%d.", Cluster::node, index2str(index), val); + event Metrics::cluster_index_response(uid, id, filter_name, index, val); + } + @endif @if ( Cluster::local_node_type() == Cluster::MANAGER ) -event Metrics::cluster_results(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool) +# Manager's handle logging. +event Metrics::log_it(filter: Filter) + { + #print fmt("%.6f MANAGER: breaking %s filter for %s metric", network_time(), filter$name, filter$id); + + local uid = unique_id(""); + + # Set some tracking variables. + requested_results[uid] = network_time(); + filter_results[uid, filter$id, filter$name] = table(); + + # Request data from peers. + event Metrics::cluster_filter_request(uid, filter$id, filter$name); + # Schedule the log_it event for the next break period. + schedule filter$break_interval { Metrics::log_it(filter) }; + } + +# This is unlikely to be called often, but it's here in case there are metrics +# being collected by managers. +function data_added(filter: Filter, index: Index, val: count) + { + if ( check_notice(filter, index, val) ) + do_notice(filter, index, val); + } + +event Metrics::cluster_index_response(uid: string, id: ID, filter_name: string, index: Index, val: count) + { + #print fmt("%0.6f MANAGER: receiving index data from %s", network_time(), get_event_peer()$descr); + + if ( [uid, id, filter_name, index] !in index_requests ) + index_requests[uid, id, filter_name, index] = 0; + + index_requests[uid, id, filter_name, index] += val; + local ir = index_requests[uid, id, filter_name, index]; + + ++done_with[uid]; + if ( Cluster::worker_count == done_with[uid] ) + { + if ( check_notice(filter_store[id, filter_name], index, ir) ) + do_notice(filter_store[id, filter_name], index, ir); + delete done_with[uid]; + delete index_requests[uid, id, filter_name, index]; + } + } + +# Managers handle intermediate updates here. +event Metrics::cluster_index_intermediate_response(id: ID, filter_name: string, index: Index, val: count) + { + #print fmt("MANAGER: receiving intermediate index data from %s", get_event_peer()$descr); + #print fmt("MANAGER: requesting index data for %s", index2str(index)); + + local uid = unique_id(""); + event Metrics::cluster_index_request(uid, id, filter_name, index); + ++recent_global_view_indexes[id, filter_name, index]; + } + +event Metrics::cluster_filter_response(uid: string, id: ID, filter_name: string, data: MetricTable, done: bool) { #print fmt("MANAGER: receiving results from %s", get_event_peer()$descr); - local local_data = collecting_results[uid, id, filter_name]; + local local_data = filter_results[uid, id, filter_name]; for ( index in data ) { if ( index !in local_data ) @@ -131,15 +245,16 @@ event Metrics::cluster_results(uid: string, id: ID, filter_name: string, data: M local ts = network_time(); # Log the time this was initially requested if it's available. if ( uid in requested_results ) + { ts = requested_results[uid]; - - write_log(ts, filter_store[id, filter_name], local_data); - if ( [uid, id, filter_name] in collecting_results ) - delete collecting_results[uid, id, filter_name]; - if ( uid in done_with ) - delete done_with[uid]; - if ( uid in requested_results ) delete requested_results[uid]; + } + + write_log(ts, filter_store[id, filter_name], local_data); + + # Clean up + delete filter_results[uid, id, filter_name]; + delete done_with[uid]; } } diff --git a/scripts/base/frameworks/metrics/main.bro b/scripts/base/frameworks/metrics/main.bro index 38ed17b36f..2dd9e19b03 100644 --- a/scripts/base/frameworks/metrics/main.bro +++ b/scripts/base/frameworks/metrics/main.bro @@ -15,6 +15,10 @@ export { ## current value to the logging stream. const default_break_interval = 15mins &redef; + ## This is the interval for how often notices will happen after they have + ## already fired. + const renotice_interval = 1hr &redef; + type Index: record { ## Host is the value to which this metric applies. host: addr &optional; @@ -56,32 +60,40 @@ export { pred: function(index: Index): bool &optional; ## Global mask by which you'd like to aggregate traffic. aggregation_mask: count &optional; - ## This is essentially applying names to various subnets. + ## This is essentially a mapping table between addresses and subnets. aggregation_table: table[subnet] of subnet &optional; ## The interval at which the metric should be "broken" and written - ## to the logging stream. + ## to the logging stream. The counters are also reset to zero at + ## this time so any threshold based detection needs to be set to a + ## number that should be expected to happen within this period. break_interval: interval &default=default_break_interval; ## This determines if the result of this filter is sent to the metrics ## logging stream. One use for the logging framework is as an internal ## thresholding and statistics gathering utility that is meant to ## never log but rather to generate notices and derive data. log: bool &default=T; - ## A straight threshold for generating a notice. - notice_threshold: count &optional; - ## A series of thresholds at which to generate notices. - ## TODO: This is not implemented yet! - notice_thresholds: vector of count &optional; ## If this and a $notice_threshold value are set, this notice type ## will be generated by the metrics framework. note: Notice::Type &optional; + ## A straight threshold for generating a notice. + notice_threshold: count &optional; + ## A series of thresholds at which to generate notices. + notice_thresholds: vector of count &optional; + ## How often this notice should be raised for this metric index. It + ## will be generated everytime it crosses a threshold, but if the + ## $break_interval is set to 5mins and this is set to 1hr the notice + ## only be generated once per hour even if something crosses the + ## threshold in every break interval. + notice_freq: interval &optional; }; global add_filter: function(id: ID, filter: Filter); global add_data: function(id: ID, index: Index, increment: count); + global index2str: function(index: Index): string; # This is the event that is used to "finish" metrics and adapt the metrics # framework for clustered or non-clustered usage. - global log_it: event(filter: Filter); + global log_it: event(filter: Filter); global log_metrics: event(rec: Info); } @@ -95,43 +107,48 @@ global filter_store: table[ID, string] of Filter = table(); type MetricTable: table[Index] of count &default=0; # This is indexed by metric ID and stream filter name. -global store: table[ID, string] of MetricTable = table(); +global store: table[ID, string] of MetricTable = table() &default=table(); + +# This function checks if a threshold has been crossed and generates a +# notice if it has. It is also used as a method to implement +# mid-break-interval threshold crossing detection for cluster deployments. +global check_notice: function(filter: Filter, index: Index, val: count): bool; + +# This is hook for watching thresholds being crossed. It is called whenever +# index values are updated and the new val is given as the `val` argument. +global data_added: function(filter: Filter, index: Index, val: count); # This stores the current threshold index for filters using the -# $notice_thresholds element. -global thresholds: table[string] of count = {} &default=0; +# $notice_threshold and $notice_thresholds elements. +global thresholds: table[ID, string, Index] of count = {} &create_expire=renotice_interval &default=0; event bro_init() &priority=5 { Log::create_stream(METRICS, [$columns=Info, $ev=log_metrics]); } +function index2str(index: Index): string + { + local out = ""; + if ( index?$host ) + out = fmt("%shost=%s", out, index$host); + if ( index?$network ) + out = fmt("%s%snetwork=%s", out, |out|==0 ? "" : ", ", index$network); + if ( index?$str ) + out = fmt("%s%sstr=%s", out, |out|==0 ? "" : ", ", index$str); + return fmt("metric_index(%s)", out); + } + function write_log(ts: time, filter: Filter, data: MetricTable) { for ( index in data ) { local val = data[index]; - local m: Info = [$ts=ts, + local m: Info = [$ts=ts, $metric_id=filter$id, $filter_name=filter$name, $index=index, $value=val]; - - if ( m$index?$host && - filter?$notice_threshold && - m$value >= filter$notice_threshold ) - { - NOTICE([$note=filter$note, - $msg=fmt("Metrics threshold crossed by %s %d/%d", index$host, m$value, filter$notice_threshold), - $src=m$index$host, $n=m$value, - $metric_index=index]); - } - - else if ( filter?$notice_thresholds && - m$value >= filter$notice_thresholds[thresholds[cat(filter$id,filter$name)]] ) - { - # TODO: implement this - } if ( filter$log ) Log::write(METRICS, m); @@ -182,18 +199,16 @@ function add_data(id: ID, index: Index, increment: count) local filters = metric_filters[id]; - # Add the data to any of the defined filters. + # Try to add the data to all of the defined filters for the metric. for ( filter_id in filters ) { local filter = filters[filter_id]; # If this filter has a predicate, run the predicate and skip this # index if the predicate return false. - if ( filter?$pred && - ! filter$pred(index) ) + if ( filter?$pred && ! filter$pred(index) ) next; - local filt_store = store[id, filter$name]; if ( index?$host ) { if ( filter?$aggregation_mask ) @@ -207,9 +222,49 @@ function add_data(id: ID, index: Index, increment: count) delete index$host; } } - - if ( index !in filt_store ) - filt_store[index] = 0; - filt_store[index] += increment; + + local metric_tbl = store[id, filter$name]; + if ( index !in metric_tbl ) + metric_tbl[index] = 0; + metric_tbl[index] += increment; + + data_added(filter, index, metric_tbl[index]); } } + +function check_notice(filter: Filter, index: Index, val: count): bool + { + if ( (filter?$notice_threshold && + [filter$id, filter$name, index] !in thresholds && + val >= filter$notice_threshold) || + (filter?$notice_thresholds && + |filter$notice_thresholds| <= thresholds[filter$id, filter$name, index] && + val >= filter$notice_thresholds[thresholds[filter$id, filter$name, index]]) ) + return T; + else + return F; + } + +function do_notice(filter: Filter, index: Index, val: count) + { + # We include $peer_descr here because the a manager count have actually + # generated the notice even though the current remote peer for the event + # calling this could be a worker if this is running as a cluster. + local n: Notice::Info = [$note=filter$note, + $n=val, + $metric_index=index, + $peer_descr=peer_description]; + n$msg = fmt("Threshold crossed by %s %d/%d", index2str(index), val, filter$notice_threshold); + if ( index?$str ) + n$sub = index$str; + if ( index?$host ) + n$src = index$host; + # TODO: not sure where to put the network yet. + + NOTICE(n); + + # This just needs set to some value so that it doesn't refire the + # notice until it expires from the table or it crosses the next + # threshold in the case of vectors of thresholds. + ++thresholds[filter$id, filter$name, index]; + } diff --git a/scripts/base/frameworks/metrics/non-cluster.bro b/scripts/base/frameworks/metrics/non-cluster.bro index a96210649e..a467ebf714 100644 --- a/scripts/base/frameworks/metrics/non-cluster.bro +++ b/scripts/base/frameworks/metrics/non-cluster.bro @@ -1,10 +1,6 @@ module Metrics; -export { - -} - event Metrics::log_it(filter: Filter) { local id = filter$id; @@ -15,3 +11,10 @@ event Metrics::log_it(filter: Filter) schedule filter$break_interval { Metrics::log_it(filter) }; } + + +function data_added(filter: Filter, index: Index, val: count) + { + if ( check_notice(filter, index, val) ) + do_notice(filter, index, val); + } \ No newline at end of file diff --git a/scripts/base/frameworks/notice/main.bro b/scripts/base/frameworks/notice/main.bro index 595851b7c5..ea7a472031 100644 --- a/scripts/base/frameworks/notice/main.bro +++ b/scripts/base/frameworks/notice/main.bro @@ -308,7 +308,9 @@ function apply_policy(n: Notice::Info) if ( ! n?$src_peer ) n$src_peer = get_event_peer(); - n$peer_descr = n$src_peer?$descr ? n$src_peer$descr : fmt("%s", n$src_peer$host); + if ( ! n?$peer_descr ) + n$peer_descr = n$src_peer?$descr ? + n$src_peer$descr : fmt("%s", n$src_peer$host); if ( ! n?$actions ) n$actions = set(); @@ -340,7 +342,7 @@ function apply_policy(n: Notice::Info) # Create the ordered notice policy automatically which will be used at runtime # for prioritized matching of the notice policy. -event bro_init() +event bro_init() &priority=10 { local tmp: table[count] of set[PolicyItem] = table(); for ( pi in policy ) diff --git a/scripts/policy/protocols/http/detect-sqli.bro b/scripts/policy/protocols/http/detect-sqli.bro index 45a2bdb205..e2933626f7 100644 --- a/scripts/policy/protocols/http/detect-sqli.bro +++ b/scripts/policy/protocols/http/detect-sqli.bro @@ -35,13 +35,13 @@ export { event bro_init() { - Metrics::add_filter(SQL_ATTACKS, [$log=T, - $break_interval=1mins, + Metrics::add_filter(SQL_ATTACKS, [$log=F, + $break_interval=5mins, $note=SQL_Injection_Attacker]); - Metrics::add_filter(SQL_ATTACKS_AGAINST, [$log=T, - $break_interval=1mins, + Metrics::add_filter(SQL_ATTACKS_AGAINST, [$log=F, + $break_interval=5mins, $note=SQL_Injection_Attack, - $notice_thresholds=vector(10,100)]); + $notice_threshold=50]); } event http_request(c: connection, method: string, original_URI: string, @@ -51,7 +51,7 @@ event http_request(c: connection, method: string, original_URI: string, { add c$http$tags[URI_SQLI]; - Metrics::add_data(SQL_ATTACKS, [$host=c$id$orig_h]); - Metrics::add_data(SQL_ATTACKS_AGAINST, [$host=c$id$resp_h]); + Metrics::add_data(SQL_ATTACKS, [$host=c$id$orig_h], 1); + Metrics::add_data(SQL_ATTACKS_AGAINST, [$host=c$id$resp_h], 1); } } \ No newline at end of file diff --git a/scripts/policy/protocols/ssh/detect-bruteforcing.bro b/scripts/policy/protocols/ssh/detect-bruteforcing.bro index 36e73bfa59..fb1c075d86 100644 --- a/scripts/policy/protocols/ssh/detect-bruteforcing.bro +++ b/scripts/policy/protocols/ssh/detect-bruteforcing.bro @@ -11,6 +11,12 @@ export { ## has now had a heuristically successful login attempt. Login_By_Password_Guesser, }; + + redef enum Metrics::ID += { + ## This metric is to measure failed logins with the hope of detecting + ## bruteforcing hosts. + FAILED_LOGIN, + }; ## The number of failed SSH connections before a host is designated as ## guessing passwords. @@ -26,54 +32,40 @@ export { ## client subnets and the yield value represents server subnets. const ignore_guessers: table[subnet] of subnet &redef; - ## Keeps count of how many rejections a host has had. - global password_rejections: table[addr] of TrackCount - &write_expire=guessing_timeout - &synchronized; - ## Keeps track of hosts identified as guessing passwords. global password_guessers: set[addr] &read_expire=guessing_timeout+1hr &synchronized; } +event bro_init() + { + Metrics::add_filter(FAILED_LOGIN, [$name="detect-bruteforcing", $log=F, + $note=Password_Guessing, + $notice_threshold=password_guesses_limit, + $notice_freq=1hr, + $break_interval=guessing_timeout]); + } + event SSH::heuristic_successful_login(c: connection) { local id = c$id; - # TODO: this should be migrated to the metrics framework. - if ( id$orig_h in password_rejections && - password_rejections[id$orig_h]$n > password_guesses_limit && - id$orig_h !in password_guessers ) - { - add password_guessers[id$orig_h]; - NOTICE([$note=Login_By_Password_Guesser, - $conn=c, - $n=password_rejections[id$orig_h]$n, - $msg=fmt("Successful SSH login by password guesser %s", id$orig_h), - $sub=fmt("%d failed logins", password_rejections[id$orig_h]$n)]); - } + # TODO: This is out for the moment pending some more additions to the + # metrics framework. + #if ( id$orig_h in password_guessers ) + # { + # NOTICE([$note=Login_By_Password_Guesser, + # $conn=c, + # $msg=fmt("Successful SSH login by password guesser %s", id$orig_h)]); + # } } event SSH::heuristic_failed_login(c: connection) { local id = c$id; - # presumed failure - if ( id$orig_h !in password_rejections ) - password_rejections[id$orig_h] = new_track_count(); - - # Track the number of rejections - # TODO: this should be migrated to the metrics framework. + # Add data to the FAILED_LOGIN metric unless this connection should + # be ignored. if ( ! (id$orig_h in ignore_guessers && id$resp_h in ignore_guessers[id$orig_h]) ) - ++password_rejections[id$orig_h]$n; - - if ( default_check_threshold(password_rejections[id$orig_h]) ) - { - add password_guessers[id$orig_h]; - NOTICE([$note=Password_Guessing, - $conn=c, - $msg=fmt("SSH password guessing by %s", id$orig_h), - $sub=fmt("%d apparently failed logins", password_rejections[id$orig_h]$n), - $n=password_rejections[id$orig_h]$n]); - } + Metrics::add_data(FAILED_LOGIN, [$host=id$orig_h], 1); } \ No newline at end of file diff --git a/scripts/policy/protocols/ssh/interesting-hostnames.bro b/scripts/policy/protocols/ssh/interesting-hostnames.bro index cf6ab7e40a..93767e5f54 100644 --- a/scripts/policy/protocols/ssh/interesting-hostnames.bro +++ b/scripts/policy/protocols/ssh/interesting-hostnames.bro @@ -36,7 +36,7 @@ event SSH::heuristic_successful_login(c: connection) } } # Check to see if this login went to an interesting hostname. - when ( local resp_hostname = lookup_addr(c$id$orig_h) ) + when ( local resp_hostname = lookup_addr(c$id$resp_h) ) { if ( interesting_hostnames in resp_hostname ) { diff --git a/src/CompHash.cc b/src/CompHash.cc index 605949b81c..763e5da463 100644 --- a/src/CompHash.cc +++ b/src/CompHash.cc @@ -47,7 +47,7 @@ CompositeHash::CompositeHash(TypeList* composite_type) else { - size = ComputeKeySize(); + size = ComputeKeySize(0, 1, true); if ( size > 0 ) // Fixed size. Make sure what we get is fully aligned. @@ -244,7 +244,7 @@ HashKey* CompositeHash::ComputeHash(const Val* v, int type_check) const if ( ! k ) { - int sz = ComputeKeySize(v, type_check); + int sz = ComputeKeySize(v, type_check, false); if ( sz == 0 ) return 0; @@ -331,7 +331,8 @@ HashKey* CompositeHash::ComputeSingletonHash(const Val* v, int type_check) const } int CompositeHash::SingleTypeKeySize(BroType* bt, const Val* v, - int type_check, int sz, bool optional) const + int type_check, int sz, bool optional, + bool calc_static_size) const { InternalTypeTag t = bt->InternalType(); @@ -393,7 +394,8 @@ int CompositeHash::SingleTypeKeySize(BroType* bt, const Val* v, sz = SingleTypeKeySize(rt->FieldType(i), rv ? rv->Lookup(i) : 0, - type_check, sz, optional); + type_check, sz, optional, + calc_static_size); if ( ! sz ) return 0; } @@ -408,7 +410,7 @@ int CompositeHash::SingleTypeKeySize(BroType* bt, const Val* v, case TYPE_INTERNAL_STRING: if ( ! v ) - return optional ? sz : 0; + return (optional && ! calc_static_size) ? sz : 0; // Factor in length field. sz = SizeAlign(sz, sizeof(int)); @@ -422,7 +424,7 @@ int CompositeHash::SingleTypeKeySize(BroType* bt, const Val* v, return sz; } -int CompositeHash::ComputeKeySize(const Val* v, int type_check) const +int CompositeHash::ComputeKeySize(const Val* v, int type_check, bool calc_static_size) const { const type_list* tl = type->Types(); const val_list* vl = 0; @@ -440,7 +442,7 @@ int CompositeHash::ComputeKeySize(const Val* v, int type_check) const loop_over_list(*tl, i) { sz = SingleTypeKeySize((*tl)[i], v ? v->AsListVal()->Index(i) : 0, - type_check, sz, false); + type_check, sz, false, calc_static_size); if ( ! sz ) return 0; } @@ -615,10 +617,6 @@ const char* CompositeHash::RecoverOneVal(const HashKey* k, const char* kp0, pval = new AddrVal(addr_val); break; - case TYPE_NET: - pval = new NetVal(addr_val); - break; - default: reporter->InternalError("bad internal address in CompositeHash::RecoverOneVal()"); pval = 0; diff --git a/src/CompHash.h b/src/CompHash.h index e2f87d240b..0e12cbf9a8 100644 --- a/src/CompHash.h +++ b/src/CompHash.h @@ -74,10 +74,12 @@ protected: // the value is computed for the particular list of values. // Returns 0 if the key has an indeterminant size (if v not given), // or if v doesn't match the index type (if given). - int ComputeKeySize(const Val* v = 0, int type_check = 1) const; + int ComputeKeySize(const Val* v, int type_check, + bool calc_static_size) const; int SingleTypeKeySize(BroType*, const Val*, - int type_check, int sz, bool optional) const; + int type_check, int sz, bool optional, + bool calc_static_size) const; TypeList* type; char* key; // space for composite key diff --git a/src/DNS_Mgr.cc b/src/DNS_Mgr.cc index e6bebda875..87d0db4dac 100644 --- a/src/DNS_Mgr.cc +++ b/src/DNS_Mgr.cc @@ -360,7 +360,7 @@ DNS_Mgr::DNS_Mgr(DNS_MgrMode arg_mode) nb_dns = nb_dns_init(err); if ( ! nb_dns ) - reporter->Warning(fmt("problem initializing NB-DNS: %s", err)); + reporter->Warning("problem initializing NB-DNS: %s", err); dns_mapping_valid = dns_mapping_unverified = dns_mapping_new_name = dns_mapping_lost_name = dns_mapping_name_changed = @@ -447,7 +447,7 @@ TableVal* DNS_Mgr::LookupHost(const char* name) return d->Addrs()->ConvertToSet(); else { - reporter->Warning("no such host:", name); + reporter->Warning("no such host: %s", name); return empty_addr_set(); } } @@ -460,7 +460,7 @@ TableVal* DNS_Mgr::LookupHost(const char* name) return empty_addr_set(); case DNS_FORCE: - reporter->InternalError("can't find DNS entry for %s in cache", name); + reporter->FatalError("can't find DNS entry for %s in cache", name); return 0; case DNS_DEFAULT: @@ -490,7 +490,7 @@ Val* DNS_Mgr::LookupAddr(uint32 addr) return d->Host(); else { - reporter->Warning("can't resolve IP address:", dotted_addr(addr)); + reporter->Warning("can't resolve IP address: %s", dotted_addr(addr)); return new StringVal(dotted_addr(addr)); } } @@ -503,7 +503,7 @@ Val* DNS_Mgr::LookupAddr(uint32 addr) return new StringVal(""); case DNS_FORCE: - reporter->InternalError("can't find DNS entry for %s in cache", + reporter->FatalError("can't find DNS entry for %s in cache", dotted_addr(addr)); return 0; @@ -574,7 +574,7 @@ void DNS_Mgr::Resolve() struct nb_dns_result r; status = nb_dns_activity(nb_dns, &r, err); if ( status < 0 ) - reporter->InternalError( + reporter->Warning( "NB-DNS error in DNS_Mgr::WaitForReplies (%s)", err); else if ( status > 0 ) @@ -823,7 +823,7 @@ void DNS_Mgr::LoadCache(FILE* f) } if ( ! m->NoMapping() ) - reporter->InternalError("DNS cache corrupted"); + reporter->FatalError("DNS cache corrupted"); delete m; fclose(f); @@ -958,7 +958,7 @@ void DNS_Mgr::IssueAsyncRequests() if ( ! dr->MakeRequest(nb_dns) ) { - reporter->Error("can't issue DNS request"); + reporter->Warning("can't issue DNS request"); req->Timeout(); continue; } @@ -1095,7 +1095,10 @@ int DNS_Mgr::AnswerAvailable(int timeout) { int fd = nb_dns_fd(nb_dns); if ( fd < 0 ) - reporter->InternalError("nb_dns_fd() failed in DNS_Mgr::WaitForReplies"); + { + reporter->Warning("nb_dns_fd() failed in DNS_Mgr::WaitForReplies"); + return -1; + } fd_set read_fds; @@ -1110,13 +1113,17 @@ int DNS_Mgr::AnswerAvailable(int timeout) if ( status < 0 ) { - if ( errno == EINTR ) - return -1; - reporter->InternalError("problem with DNS select"); + if ( errno != EINTR ) + reporter->Warning("problem with DNS select"); + + return -1; } if ( status > 1 ) - reporter->InternalError("strange return from DNS select"); + { + reporter->Warning("strange return from DNS select"); + return -1; + } return status; } diff --git a/src/DNS_Mgr.h b/src/DNS_Mgr.h index 580eae92f1..151c05289f 100644 --- a/src/DNS_Mgr.h +++ b/src/DNS_Mgr.h @@ -100,8 +100,9 @@ protected: void LoadCache(FILE* f); void Save(FILE* f, PDict(DNS_Mapping)& m); - // Selects on the fd to see if there is an answer available (timeout is - // secs). Returns 0 on timeout, -1 on EINTR, and 1 if answer is ready. + // Selects on the fd to see if there is an answer available (timeout + // is secs). Returns 0 on timeout, -1 on EINTR or other error, and 1 + // if answer is ready. int AnswerAvailable(int timeout); // Issue as many queued async requests as slots are available. diff --git a/src/Expr.cc b/src/Expr.cc index c4fbe5930a..dc47340ccd 100644 --- a/src/Expr.cc +++ b/src/Expr.cc @@ -2046,7 +2046,6 @@ EqExpr::EqExpr(BroExprTag arg_tag, Expr* arg_op1, Expr* arg_op2) case TYPE_STRING: case TYPE_PORT: case TYPE_ADDR: - case TYPE_NET: case TYPE_SUBNET: case TYPE_ERROR: break; @@ -5022,13 +5021,11 @@ Val* ListExpr::InitVal(const BroType* t, Val* aggr) const Expr* e = exprs[i]; check_and_promote_expr(e, vec->Type()->AsVectorType()->YieldType()); Val* v = e->Eval(0); - if ( ! vec->Assign(i, v->RefCnt() == 1 ? v->Ref() : v, e) ) + if ( ! vec->Assign(i, v, e) ) { e->Error(fmt("type mismatch at index %d", i)); return 0; } - - Unref(v); } return aggr; diff --git a/src/H3.h b/src/H3.h index 2a5368a93d..b4eab3a727 100644 --- a/src/H3.h +++ b/src/H3.h @@ -59,8 +59,13 @@ #ifndef H3_H #define H3_H +#include + +// The number of values representable by a byte. +#define H3_BYTE_RANGE (UCHAR_MAX+1) + template class H3 { - T byte_lookup[N][256]; + T byte_lookup[N][H3_BYTE_RANGE]; public: H3(); ~H3() { free(byte_lookup); } @@ -90,9 +95,9 @@ public: template H3::H3() { - T bit_lookup[N * 8]; + T bit_lookup[N * CHAR_BIT]; - for (size_t bit = 0; bit < N * 8; bit++) { + for (size_t bit = 0; bit < N * CHAR_BIT; bit++) { bit_lookup[bit] = 0; for (size_t i = 0; i < sizeof(T)/2; i++) { // assume random() returns at least 16 random bits @@ -101,12 +106,12 @@ H3::H3() } for (size_t byte = 0; byte < N; byte++) { - for (unsigned val = 0; val < 256; val++) { + for (unsigned val = 0; val < H3_BYTE_RANGE; val++) { byte_lookup[byte][val] = 0; - for (size_t bit = 0; bit < 8; bit++) { + for (size_t bit = 0; bit < CHAR_BIT; bit++) { // Does this mean byte_lookup[*][0] == 0? -RP if (val & (1 << bit)) - byte_lookup[byte][val] ^= bit_lookup[byte*8+bit]; + byte_lookup[byte][val] ^= bit_lookup[byte*CHAR_BIT+bit]; } } } diff --git a/src/Hash.cc b/src/Hash.cc index ffabe36f83..1902af4f37 100644 --- a/src/Hash.cc +++ b/src/Hash.cc @@ -168,13 +168,12 @@ hash_t HashKey::HashBytes(const void* bytes, int size) { if ( size <= UHASH_KEY_SIZE ) { - const uint8* b = reinterpret_cast(bytes); // H3 doesn't check if size is zero return ( size == 0 ) ? 0 : (*h3)(bytes, size); } // Fall back to HMAC/MD5 for longer data (which is usually rare). hash_t digest[16]; - hmac_md5(size, (unsigned char*) bytes, (unsigned char*) digest); + hmac_md5(size, (const unsigned char*) bytes, (unsigned char*) digest); return digest[0]; } diff --git a/src/LogMgr.cc b/src/LogMgr.cc index 4719d04a22..950de24a8f 100644 --- a/src/LogMgr.cc +++ b/src/LogMgr.cc @@ -122,7 +122,6 @@ bool LogVal::IsCompatibleType(BroType* t, bool atomic_only) case TYPE_COUNTER: case TYPE_PORT: case TYPE_SUBNET: - case TYPE_NET: case TYPE_ADDR: case TYPE_DOUBLE: case TYPE_TIME: @@ -205,7 +204,6 @@ bool LogVal::Read(SerializationFormat* fmt) return true; } - case TYPE_NET: case TYPE_ADDR: { uint32 addr[4]; @@ -319,7 +317,6 @@ bool LogVal::Write(SerializationFormat* fmt) const fmt->Write(val.subnet_val.width, "width"); } - case TYPE_NET: case TYPE_ADDR: { uint32 addr[4]; @@ -1051,7 +1048,6 @@ LogVal* LogMgr::ValToLogVal(Val* val, BroType* ty) lval->val.subnet_val = *val->AsSubNet(); break; - case TYPE_NET: case TYPE_ADDR: { addr_type t = val->AsAddr(); diff --git a/src/LogWriterAscii.cc b/src/LogWriterAscii.cc index 446d6c8d65..ad321dafaa 100644 --- a/src/LogWriterAscii.cc +++ b/src/LogWriterAscii.cc @@ -136,19 +136,18 @@ bool LogWriterAscii::DoWriteOne(ODesc* desc, LogVal* val, const LogField* field) desc->Add(val->val.subnet_val.width); break; - case TYPE_NET: case TYPE_ADDR: desc->Add(dotted_addr(val->val.addr_val)); break; case TYPE_TIME: + case TYPE_INTERVAL: char buf[32]; snprintf(buf, sizeof(buf), "%.6f", val->val.double_val); desc->Add(buf); break; case TYPE_DOUBLE: - case TYPE_INTERVAL: desc->Add(val->val.double_val); break; diff --git a/src/SerialTypes.h b/src/SerialTypes.h index c2c4123a57..4d9b7a5880 100644 --- a/src/SerialTypes.h +++ b/src/SerialTypes.h @@ -91,16 +91,15 @@ SERIAL_VAL(VAL, 1) SERIAL_VAL(INTERVAL_VAL, 2) SERIAL_VAL(PORT_VAL, 3) SERIAL_VAL(ADDR_VAL, 4) -SERIAL_VAL(NET_VAL, 5) -SERIAL_VAL(SUBNET_VAL, 6) -SERIAL_VAL(STRING_VAL, 7) -SERIAL_VAL(PATTERN_VAL, 8) -SERIAL_VAL(LIST_VAL, 9) -SERIAL_VAL(TABLE_VAL, 10) -SERIAL_VAL(RECORD_VAL, 11) -SERIAL_VAL(ENUM_VAL, 12) -SERIAL_VAL(VECTOR_VAL, 13) -SERIAL_VAL(MUTABLE_VAL, 14) +SERIAL_VAL(SUBNET_VAL, 5) +SERIAL_VAL(STRING_VAL, 6) +SERIAL_VAL(PATTERN_VAL, 7) +SERIAL_VAL(LIST_VAL, 8) +SERIAL_VAL(TABLE_VAL, 9) +SERIAL_VAL(RECORD_VAL, 10) +SERIAL_VAL(ENUM_VAL, 11) +SERIAL_VAL(VECTOR_VAL, 12) +SERIAL_VAL(MUTABLE_VAL, 13) #define SERIAL_EXPR(name, val) SERIAL_CONST(name, val, EXPR) SERIAL_EXPR(EXPR, 1) diff --git a/src/Serializer.h b/src/Serializer.h index 6bdac10f83..857abc0980 100644 --- a/src/Serializer.h +++ b/src/Serializer.h @@ -123,7 +123,7 @@ protected: // This will be increased whenever there is an incompatible change // in the data format. - static const uint32 DATA_FORMAT_VERSION = 19; + static const uint32 DATA_FORMAT_VERSION = 20; ChunkedIO* io; diff --git a/src/Type.cc b/src/Type.cc index 521db20ebf..c2ab7e85df 100644 --- a/src/Type.cc +++ b/src/Type.cc @@ -28,7 +28,7 @@ const char* type_name(TypeTag t) "string", "pattern", "enum", "timer", - "port", "addr", "net", "subnet", + "port", "addr", "subnet", "any", "table", "union", "record", "types", "func", @@ -86,7 +86,6 @@ BroType::BroType(TypeTag t, bool arg_base_type) break; case TYPE_ADDR: - case TYPE_NET: internal_tag = TYPE_INTERNAL_ADDR; break; @@ -1687,7 +1686,6 @@ int same_type(const BroType* t1, const BroType* t2, int is_init) case TYPE_TIMER: case TYPE_PORT: case TYPE_ADDR: - case TYPE_NET: case TYPE_SUBNET: case TYPE_ANY: case TYPE_ERROR: @@ -1863,7 +1861,6 @@ int is_assignable(BroType* t) case TYPE_TIMER: case TYPE_PORT: case TYPE_ADDR: - case TYPE_NET: case TYPE_SUBNET: case TYPE_RECORD: case TYPE_FUNC: @@ -1941,7 +1938,6 @@ BroType* merge_types(const BroType* t1, const BroType* t2) case TYPE_TIMER: case TYPE_PORT: case TYPE_ADDR: - case TYPE_NET: case TYPE_SUBNET: case TYPE_BOOL: case TYPE_ANY: diff --git a/src/Type.h b/src/Type.h index 6ab8c65e03..1d7637828d 100644 --- a/src/Type.h +++ b/src/Type.h @@ -23,7 +23,7 @@ typedef enum { TYPE_STRING, TYPE_PATTERN, TYPE_ENUM, TYPE_TIMER, - TYPE_PORT, TYPE_ADDR, TYPE_NET, TYPE_SUBNET, + TYPE_PORT, TYPE_ADDR, TYPE_SUBNET, TYPE_ANY, TYPE_TABLE, TYPE_UNION, diff --git a/src/Val.cc b/src/Val.cc index 7db45cf648..2f4f6d2850 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -1099,84 +1099,6 @@ static uint32 parse_dotted(const char* text, int& dots) return a; } -NetVal::NetVal(const char* text) : AddrVal(TYPE_NET) - { - int dots; - uint32 a = parse_dotted(text, dots); - - if ( addr_to_net(a) != a ) - reporter->Error("bad net address", text); - - Init(uint32(htonl(a))); - } - -NetVal::NetVal(uint32 addr) : AddrVal(TYPE_NET) - { - Init(addr); - } - -#ifdef BROv6 -NetVal::NetVal(const uint32* addr) : AddrVal(TYPE_NET) - { - Init(addr); - } -#endif - -Val* NetVal::SizeVal() const - { - uint32 addr; - -#ifdef BROv6 - if ( ! is_v4_addr(val.addr_val) ) - { - Error("|net| for IPv6 addresses not supported"); - return new Val(0.0, TYPE_DOUBLE); - } - - addr = to_v4_addr(val.addr_val); -#else - addr = val.addr_val; -#endif - addr = ntohl(addr); - - if ( (addr & 0xFFFFFFFF) == 0L ) - return new Val(4294967296.0, TYPE_DOUBLE); - - if ( (addr & 0x00FFFFFF) == 0L ) - return new Val(double(0xFFFFFF + 1), TYPE_DOUBLE); - - if ( (addr & 0x0000FFFF) == 0L ) - return new Val(double(0xFFFF + 1), TYPE_DOUBLE); - - if ( (addr & 0x000000FF) == 0L ) - return new Val(double(0xFF + 1), TYPE_DOUBLE); - - return new Val(1.0, TYPE_DOUBLE); - } - -void NetVal::ValDescribe(ODesc* d) const - { -#ifdef BROv6 - d->Add(dotted_net6(val.addr_val)); -#else - d->Add(dotted_net(val.addr_val)); -#endif - } - -IMPLEMENT_SERIAL(NetVal, SER_NET_VAL); - -bool NetVal::DoSerialize(SerialInfo* info) const - { - DO_SERIALIZE(SER_NET_VAL, AddrVal); - return true; - } - -bool NetVal::DoUnserialize(UnserialInfo* info) - { - DO_UNSERIALIZE(AddrVal); - return true; - } - SubNetVal::SubNetVal(const char* text) : Val(TYPE_SUBNET) { const char* sep = strchr(text, '/'); diff --git a/src/Val.h b/src/Val.h index 76edea5429..c9f780a738 100644 --- a/src/Val.h +++ b/src/Val.h @@ -30,7 +30,6 @@ class SerialInfo; class PortVal; class AddrVal; -class NetVal; class SubNetVal; class IntervalVal; @@ -244,7 +243,7 @@ public: // ... in network byte order const addr_type AsAddr() const { - if ( type->Tag() != TYPE_ADDR && type->Tag() != TYPE_NET ) + if ( type->Tag() != TYPE_ADDR ) BadTag("Val::AsAddr", type_name(type->Tag())); return val.addr_val; } @@ -284,7 +283,6 @@ public: CONVERTER(TYPE_PATTERN, PatternVal*, AsPatternVal) CONVERTER(TYPE_PORT, PortVal*, AsPortVal) - CONVERTER(TYPE_NET, NetVal*, AsNetVal) CONVERTER(TYPE_SUBNET, SubNetVal*, AsSubNetVal) CONVERTER(TYPE_TABLE, TableVal*, AsTableVal) CONVERTER(TYPE_RECORD, RecordVal*, AsRecordVal) @@ -302,7 +300,6 @@ public: CONST_CONVERTER(TYPE_PATTERN, PatternVal*, AsPatternVal) CONST_CONVERTER(TYPE_PORT, PortVal*, AsPortVal) - CONST_CONVERTER(TYPE_NET, NetVal*, AsNetVal) CONST_CONVERTER(TYPE_SUBNET, SubNetVal*, AsSubNetVal) CONST_CONVERTER(TYPE_TABLE, TableVal*, AsTableVal) CONST_CONVERTER(TYPE_RECORD, RecordVal*, AsRecordVal) @@ -575,23 +572,6 @@ protected: DECLARE_SERIAL(AddrVal); }; -class NetVal : public AddrVal { -public: - NetVal(const char* text); - NetVal(uint32 addr); - NetVal(const uint32* addr); - - Val* SizeVal() const; - -protected: - friend class Val; - NetVal() {} - - void ValDescribe(ODesc* d) const; - - DECLARE_SERIAL(NetVal); -}; - class SubNetVal : public Val { public: SubNetVal(const char* text); diff --git a/src/bif_type.def b/src/bif_type.def index 84179be1c3..e9bf22eafc 100644 --- a/src/bif_type.def +++ b/src/bif_type.def @@ -12,7 +12,6 @@ DEFINE_BIF_TYPE(TYPE_DOUBLE, "double", "double", "double", "%s->AsDouble()", DEFINE_BIF_TYPE(TYPE_FILE, "file", "file", "BroFile*", "%s->AsFile()", "new Val(%s)") DEFINE_BIF_TYPE(TYPE_INT, "int", "int", "bro_int_t", "%s->AsInt()", "new Val(%s, TYPE_BOOL)") DEFINE_BIF_TYPE(TYPE_INTERVAL, "interval", "interval", "double", "%s->AsInterval()", "new IntervalVal(%s, Seconds)") -DEFINE_BIF_TYPE(TYPE_NET, "net", "net", "addr_type", "%s->AsAddr()", "new NetVal(%s)") DEFINE_BIF_TYPE(TYPE_PACKET, "packet", "packet", "TCP_TracePacket*", "%s->AsRecordVal()->GetOrigin()", "%s->PacketVal()") DEFINE_BIF_TYPE(TYPE_PATTERN, "pattern", "pattern", "RE_Matcher*", "%s->AsPattern()", "new PatternVal(%s)") // DEFINE_BIF_TYPE(TYPE_PORT, "port", "port", "uint32", "%s->AsPortVal()->Port()", "incomplete data") diff --git a/src/bro.bif b/src/bro.bif index 76317f5dd4..e4d0f2092b 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -588,53 +588,6 @@ function raw_bytes_to_v4_addr%(b: string%): addr return new AddrVal(htonl(a)); %} -function to_net%(a: addr%): net - %{ -#ifdef BROv6 - if ( ! is_v4_addr(a) ) - { - builtin_error("conversion of non-IPv4 address to net", @ARG@[0]); - return new NetVal(uint32(0)); - } - - uint32 addr = to_v4_addr(a); -#else - uint32 addr = a; -#endif - - addr = htonl(addr_to_net(ntohl(addr))); - - return new NetVal(addr); - %} - -function net_to_subnet%(a: net%): subnet - %{ -#ifdef BROv6 - if ( ! is_v4_addr(a) ) - { - builtin_error("conversion of non-IPv4 address to subnet", @ARG@[0]); - return new SubNetVal(uint32(0), 0); - } - - uint32 addr = to_v4_addr(a); -#else - uint32 addr = a; -#endif - - switch ( addr_to_class(ntohl(addr)) ) { - case 'A': - return new SubNetVal(addr, 8); - case 'B': - return new SubNetVal(addr, 16); - case 'C': - case 'D': - return new SubNetVal(addr, 24); - - default: - return new SubNetVal(addr, 0); - } - %} - function to_port%(num: count, proto: transport_proto%): port %{ return new PortVal(num, (TransportProto)proto->AsEnum()); @@ -1740,19 +1693,6 @@ function preserve_subnet%(a: subnet%): any return 0; %} -function preserve_net%(a: net%): any - %{ -#ifdef BROv6 - builtin_error("preserve_net() not supported with --enable-BROv6"); -#else - AnonymizeIPAddr* ip_anon = ip_anonymizer[PREFIX_PRESERVING_A50]; - if ( ip_anon ) - ip_anon->PreserveNet(a); -#endif - - return 0; - %} - # Anonymize given IP address. function anonymize_addr%(a: addr, cl: IPAddrAnonymizationClass%): addr %{ diff --git a/src/main.cc b/src/main.cc index f1b393310b..100305d811 100644 --- a/src/main.cc +++ b/src/main.cc @@ -688,6 +688,7 @@ int main(int argc, char** argv) if ( optind == argc && read_files.length() == 0 && flow_files.length() == 0 && + interfaces.length() == 0 && ! (id_name || bst_file) && ! command_line_policy ) add_input_file("-"); diff --git a/src/parse.y b/src/parse.y index 7964fa1bcc..2eb84680b7 100644 --- a/src/parse.y +++ b/src/parse.y @@ -11,7 +11,7 @@ %token TOK_CONSTANT TOK_COPY TOK_COUNT TOK_COUNTER TOK_DEFAULT TOK_DELETE %token TOK_DOUBLE TOK_ELSE TOK_ENUM TOK_EVENT TOK_EXPORT TOK_FILE TOK_FOR %token TOK_FUNCTION TOK_GLOBAL TOK_ID TOK_IF TOK_INT -%token TOK_INTERVAL TOK_LIST TOK_LOCAL TOK_MODULE TOK_MATCH TOK_NET +%token TOK_INTERVAL TOK_LIST TOK_LOCAL TOK_MODULE TOK_MATCH %token TOK_NEXT TOK_OF TOK_PATTERN TOK_PATTERN_TEXT %token TOK_PORT TOK_PRINT TOK_RECORD TOK_REDEF %token TOK_REMOVE_FROM TOK_RETURN TOK_SCHEDULE TOK_SET @@ -787,11 +787,6 @@ type: $$ = base_type(TYPE_ADDR); } - | TOK_NET { - set_location(@1); - $$ = base_type(TYPE_NET); - } - | TOK_SUBNET { set_location(@1); $$ = base_type(TYPE_SUBNET); diff --git a/src/scan.l b/src/scan.l index bbac148a10..cdeedbf038 100644 --- a/src/scan.l +++ b/src/scan.l @@ -265,7 +265,6 @@ list return TOK_LIST; local return TOK_LOCAL; match return TOK_MATCH; module return TOK_MODULE; -net return TOK_NET; next return TOK_NEXT; of return TOK_OF; pattern return TOK_PATTERN; @@ -439,8 +438,6 @@ F RET_CONST(new Val(false, TYPE_BOOL)) RET_CONST(new PortVal(p, TRANSPORT_UNKNOWN)) } -{D}"."{D}"." RET_CONST(new NetVal(yytext)) -({D}"."){2}{D} RET_CONST(new NetVal(yytext)) ({D}"."){3}{D} RET_CONST(new AddrVal(yytext)) ({HEX}:){7}{HEX} RET_CONST(new AddrVal(yytext)) diff --git a/testing/btest/Baseline/istate.persistence/vars.read.log b/testing/btest/Baseline/istate.persistence/vars.read.log index a141deb9a8..f52791dfb1 100644 --- a/testing/btest/Baseline/istate.persistence/vars.read.log +++ b/testing/btest/Baseline/istate.persistence/vars.read.log @@ -4,7 +4,6 @@ Hallihallo 1.2.3.4 1.2.0.0/16 3.14 -131.159 42.0 secs { [2] = uiop, diff --git a/testing/btest/Baseline/istate.persistence/vars.write.log b/testing/btest/Baseline/istate.persistence/vars.write.log index a141deb9a8..f52791dfb1 100644 --- a/testing/btest/Baseline/istate.persistence/vars.write.log +++ b/testing/btest/Baseline/istate.persistence/vars.write.log @@ -4,7 +4,6 @@ Hallihallo 1.2.3.4 1.2.0.0/16 3.14 -131.159 42.0 secs { [2] = uiop, diff --git a/testing/btest/Baseline/istate.pybroccoli/bro..stdout b/testing/btest/Baseline/istate.pybroccoli/bro..stdout index c1e12c1b89..1e70711932 100644 --- a/testing/btest/Baseline/istate.pybroccoli/bro..stdout +++ b/testing/btest/Baseline/istate.pybroccoli/bro..stdout @@ -1,14 +1,13 @@ ==== atomic -10 2 -1311279327.7675 +1313624487.48817 2.0 mins F 1.5 Servus 5555/tcp 6.7.6.5 -0.0 192.168.0.0/16 ==== record [a=42, b=6.6.7.7] diff --git a/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered b/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered index 65d10201d5..864a4eb627 100644 --- a/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered +++ b/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered @@ -1,38 +1,35 @@ ==== atomic a 1 ==== -4L -4 42 42 -1311279327.7680 +1313624487.4889 60.0 True True 3.14 'Hurz' Hurz '12345/udp' 12345/udp '1.2.3.4' 1.2.3.4 -'X.X.X' X.X.X '22.33.44.0/24' 22.33.44.0/24 ==== atomic a 2 ==== -10L -10 2 2 -1311279327.7675 +1313624487.4882 120.0 False False 1.5 'Servus' Servus '5555/tcp' 5555/tcp '6.7.6.5' 6.7.6.5 -'X.X.X' X.X.X '192.168.0.0/16' 192.168.0.0/16 ==== atomic b 2 ==== -10L -10 2 - 1311279327.7675 + 1313624487.4882 120.0 False False 1.5 'Servus' Servus 5555/tcp 6.7.6.5 - X.X.X 192.168.0.0/16 ==== record 1 ==== diff --git a/testing/btest/Baseline/istate.sync/receiver.vars.log b/testing/btest/Baseline/istate.sync/receiver.vars.log index 124c162579..b28cfbd5c9 100644 --- a/testing/btest/Baseline/istate.sync/receiver.vars.log +++ b/testing/btest/Baseline/istate.sync/receiver.vars.log @@ -4,7 +4,6 @@ Jodel 4.3.2.1 4.0.0.0/8 21.0 -192.150.186 42.0 secs { [1] = asdfg2, diff --git a/testing/btest/Baseline/istate.sync/sender.vars.log b/testing/btest/Baseline/istate.sync/sender.vars.log index 124c162579..b28cfbd5c9 100644 --- a/testing/btest/Baseline/istate.sync/sender.vars.log +++ b/testing/btest/Baseline/istate.sync/sender.vars.log @@ -4,7 +4,6 @@ Jodel 4.3.2.1 4.0.0.0/8 21.0 -192.150.186 42.0 secs { [1] = asdfg2, diff --git a/testing/btest/Baseline/language.sizeof/output b/testing/btest/Baseline/language.sizeof/output index 0c1f3448c6..737a999292 100644 --- a/testing/btest/Baseline/language.sizeof/output +++ b/testing/btest/Baseline/language.sizeof/output @@ -7,7 +7,6 @@ File 21.000000 Function add_interface: 2 Integer -10: 10 Interval -5.0 secs: 5.000000 -Net 192.168.0: 65536.000000 Port 80/tcp: 65616 Record [i=10, j=, k=]: 3 Set: 3 diff --git a/testing/btest/Baseline/policy.frameworks.logging.remote-types/receiver.test.log b/testing/btest/Baseline/policy.frameworks.logging.remote-types/receiver.test.log index 865786eb2f..eee707f89f 100644 --- a/testing/btest/Baseline/policy.frameworks.logging.remote-types/receiver.test.log +++ b/testing/btest/Baseline/policy.frameworks.logging.remote-types/receiver.test.log @@ -1,2 +1,2 @@ -# b i e c p sn n a d t iv s sc ss se vc ve -T -42 Test::TEST 21 123 10.0.0.0/24 10.0.0.0 1.2.3.4 3.14 1312565184.899030 100.0 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY +# b i e c p sn a d t iv s sc ss se vc ve +T -42 Test::TEST 21 123 10.0.0.0/24 1.2.3.4 3.14 1313623666.027768 100.0 hurz 2,4,1,3 CC,AA,BB EMPTY 10,20,30 EMPTY diff --git a/testing/btest/Baseline/policy.frameworks.logging.types/ssh.log b/testing/btest/Baseline/policy.frameworks.logging.types/ssh.log index 5666db73c6..e5633026b2 100644 Binary files a/testing/btest/Baseline/policy.frameworks.logging.types/ssh.log and b/testing/btest/Baseline/policy.frameworks.logging.types/ssh.log differ diff --git a/testing/btest/Baseline/policy.frameworks.metrics.cluster-intermediate-update/manager-1.notice.log b/testing/btest/Baseline/policy.frameworks.metrics.cluster-intermediate-update/manager-1.notice.log new file mode 100644 index 0000000000..48c74fe7c4 --- /dev/null +++ b/testing/btest/Baseline/policy.frameworks.metrics.cluster-intermediate-update/manager-1.notice.log @@ -0,0 +1,2 @@ +# ts uid id.orig_h id.orig_p id.resp_h id.resp_p note msg sub src dst p n peer_descr actions policy_items dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network +1313897486.017657 - - - - - Test_Notice Threshold crossed by metric_index(host=1.2.3.4) 100/100 - 1.2.3.4 - - 100 manager-1 Notice::ACTION_LOG 4 - - - - - - 1.2.3.4 - - diff --git a/testing/btest/Baseline/policy.frameworks.metrics.notice/notice.log b/testing/btest/Baseline/policy.frameworks.metrics.notice/notice.log index 112fe69e4b..1e0e6a572b 100644 --- a/testing/btest/Baseline/policy.frameworks.metrics.notice/notice.log +++ b/testing/btest/Baseline/policy.frameworks.metrics.notice/notice.log @@ -1,4 +1,3 @@ # ts uid id.orig_h id.orig_p id.resp_h id.resp_p note msg sub src dst p n peer_descr actions policy_items dropped remote_location.country_code remote_location.region remote_location.city remote_location.latitude remote_location.longitude metric_index.host metric_index.str metric_index.network -1313432466.662314 - - - - - Test_Notice Metrics threshold crossed by 6.5.4.3 2/1 - 6.5.4.3 - - 2 bro Notice::ACTION_LOG 4 - - - - - - 6.5.4.3 - - -1313432466.662314 - - - - - Test_Notice Metrics threshold crossed by 1.2.3.4 3/1 - 1.2.3.4 - - 3 bro Notice::ACTION_LOG 4 - - - - - - 1.2.3.4 - - -1313432466.662314 - - - - - Test_Notice Metrics threshold crossed by 7.2.1.5 1/1 - 7.2.1.5 - - 1 bro Notice::ACTION_LOG 4 - - - - - - 7.2.1.5 - - +1313685819.326521 - - - - - Test_Notice Threshold crossed by metric_index(host=1.2.3.4) 3/2 - 1.2.3.4 - - 3 bro Notice::ACTION_LOG 4 - - - - - - 1.2.3.4 - - +1313685819.326521 - - - - - Test_Notice Threshold crossed by metric_index(host=6.5.4.3) 2/2 - 6.5.4.3 - - 2 bro Notice::ACTION_LOG 4 - - - - - - 6.5.4.3 - - diff --git a/testing/btest/istate/persistence.bro b/testing/btest/istate/persistence.bro index 7b078baefb..ea2a5368ba 100644 --- a/testing/btest/istate/persistence.bro +++ b/testing/btest/istate/persistence.bro @@ -18,7 +18,6 @@ event bro_done() print out, foo4; print out, foo5; print out, foo6; - print out, foo7; print out, foo8; print out, foo9; print out, foo10; @@ -43,7 +42,6 @@ global foo3: string &persistent &synchronized; global foo4: addr &persistent &synchronized; global foo5: subnet &persistent &synchronized; global foo6: double &persistent &synchronized; -global foo7: net &persistent &synchronized; global foo8: interval &persistent &synchronized; global foo9: table[count] of string &persistent &synchronized; global foo10: file &persistent &synchronized; @@ -79,7 +77,6 @@ global foo3 = "Hallihallo" &persistent &synchronized; global foo4 = 1.2.3.4 &persistent &synchronized; global foo5 = 1.2.0.0/16 &persistent &synchronized; global foo6 = 3.14 &persistent &synchronized; -global foo7 = 131.159. &persistent &synchronized; global foo8 = 42 secs &persistent &synchronized; global foo9 = { [1] = "qwerty", [2] = "uiop" } &persistent &synchronized; global foo10 = open("test") &persistent &synchronized; diff --git a/testing/btest/istate/sync.bro b/testing/btest/istate/sync.bro index fcd4c0cfb3..567bbf2af1 100644 --- a/testing/btest/istate/sync.bro +++ b/testing/btest/istate/sync.bro @@ -17,7 +17,6 @@ global foo3 = "Hallihallo" &persistent &synchronized; global foo4 = 1.2.3.4 &persistent &synchronized; global foo5 = 1.2.0.0/16 &persistent &synchronized; global foo6 = 3.14 &persistent &synchronized; -global foo7 = 131.159. &persistent &synchronized; global foo8 = 42 secs &persistent &synchronized; global foo9 = { [1] = "qwerty", [2] = "uiop" } &persistent &synchronized; global foo10 = open("test") &persistent &synchronized; @@ -60,7 +59,6 @@ event bro_done() print out, foo4; print out, foo5; print out, foo6; - print out, foo7; print out, foo8; print out, foo9; print out, foo10; @@ -93,8 +91,6 @@ function modify() foo6 = 21; - foo7 = 192.150.186; - foo9[3] = "asdfg1"; foo9[1] = "asdfg2"; delete foo9[2]; diff --git a/testing/btest/language/sizeof.bro b/testing/btest/language/sizeof.bro index 7db78212ad..3457f895c9 100644 --- a/testing/btest/language/sizeof.bro +++ b/testing/btest/language/sizeof.bro @@ -26,7 +26,6 @@ global d: double = -1.23; global f: file = open_log_file("sizeof_demo"); global i: int = -10; global iv: interval = -5sec; -global n: net = 192.168.; global p: port = 80/tcp; global r: example_record [ $i = 10 ]; global si: set[int]; @@ -82,10 +81,6 @@ print fmt("Integer %s: %d", i, |i|); # Size of interval: returns double representation of the interval print fmt("Interval %s: %f", iv, |iv|); -# Size of net: returns size of class N network as a double -# (so that 2^32 can be expressed too). -print fmt("Net %s: %f", n, |n|); - # Size of port: returns port number as a count. print fmt("Port %s: %d", p, |p|); diff --git a/testing/btest/policy/frameworks/logging/remote-types.bro b/testing/btest/policy/frameworks/logging/remote-types.bro index 1e60ce70af..08960eba86 100644 --- a/testing/btest/policy/frameworks/logging/remote-types.bro +++ b/testing/btest/policy/frameworks/logging/remote-types.bro @@ -24,7 +24,6 @@ export { c: count; p: port; sn: subnet; - n: net; a: addr; d: double; t: time; @@ -63,7 +62,6 @@ event remote_connection_handshake_done(p: event_peer) $c=21, $p=123/tcp, $sn=10.0.0.1/24, - $n=10.0., $a=1.2.3.4, $d=3.14, $t=network_time(), diff --git a/testing/btest/policy/frameworks/logging/types.bro b/testing/btest/policy/frameworks/logging/types.bro index 2c4d52f34e..9e299c643a 100644 --- a/testing/btest/policy/frameworks/logging/types.bro +++ b/testing/btest/policy/frameworks/logging/types.bro @@ -18,7 +18,6 @@ export { c: count; p: port; sn: subnet; - n: net; a: addr; d: double; t: time; @@ -55,7 +54,6 @@ event bro_init() $c=21, $p=123/tcp, $sn=10.0.0.1/24, - $n=10.0., $a=1.2.3.4, $d=3.14, $t=network_time(), diff --git a/testing/btest/policy/frameworks/metrics/basic-cluster.bro b/testing/btest/policy/frameworks/metrics/basic-cluster.bro index eda41c3759..75dd7b762d 100644 --- a/testing/btest/policy/frameworks/metrics/basic-cluster.bro +++ b/testing/btest/policy/frameworks/metrics/basic-cluster.bro @@ -24,15 +24,11 @@ event bro_init() &priority=5 Metrics::add_filter(TEST_METRIC, [$name="foo-bar", $break_interval=3secs]); + + if ( Cluster::local_node_type() == Cluster::WORKER ) + { + Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3); + Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2); + Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1); + } } - -@if ( Cluster::local_node_type() == Cluster::WORKER ) - -event bro_init() - { - Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3); - Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2); - Metrics::add_data(TEST_METRIC, [$host=7.2.1.5], 1); - } - -@endif \ No newline at end of file diff --git a/testing/btest/policy/frameworks/metrics/cluster-intermediate-update.bro b/testing/btest/policy/frameworks/metrics/cluster-intermediate-update.bro new file mode 100644 index 0000000000..519de35805 --- /dev/null +++ b/testing/btest/policy/frameworks/metrics/cluster-intermediate-update.bro @@ -0,0 +1,54 @@ +# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT +# @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT +# @TEST-EXEC: sleep 1 +# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT +# @TEST-EXEC: btest-bg-run worker-2 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-2 bro %INPUT +# @TEST-EXEC: btest-bg-wait -k 5 +# @TEST-EXEC: btest-diff manager-1/notice.log + +@TEST-START-FILE cluster-layout.bro +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1")], + ["proxy-1"] = [$node_type=Cluster::PROXY, $ip=127.0.0.1, $p=37758/tcp, $manager="manager-1", $workers=set("worker-1")], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth0"], + ["worker-2"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37761/tcp, $manager="manager-1", $proxy="proxy-1", $interface="eth1"], +}; +@TEST-END-FILE + +redef enum Notice::Type += { + Test_Notice, +}; + +redef enum Metrics::ID += { + TEST_METRIC, +}; + +event bro_init() &priority=5 + { + Metrics::add_filter(TEST_METRIC, + [$name="foo-bar", + $break_interval=1hr, + $note=Test_Notice, + $notice_threshold=100, + $log=T]); + } + +@if ( Cluster::local_node_type() == Cluster::WORKER ) + +event do_metrics(i: count) + { + # Worker-1 will trigger an intermediate update and then if everything + # works correctly, the data from worker-2 will hit the threshold and + # should trigger the notice. + Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], i); + } + +event bro_init() + { + if ( Cluster::node == "worker-1" ) + schedule 2sec { do_metrics(99) }; + if ( Cluster::node == "worker-2" ) + event do_metrics(1); + } + +@endif \ No newline at end of file diff --git a/testing/btest/policy/frameworks/metrics/notice.bro b/testing/btest/policy/frameworks/metrics/notice.bro index 3451af18f4..0ac9faa956 100644 --- a/testing/btest/policy/frameworks/metrics/notice.bro +++ b/testing/btest/policy/frameworks/metrics/notice.bro @@ -1,6 +1,7 @@ # @TEST-EXEC: bro %INPUT # @TEST-EXEC: btest-diff notice.log + redef enum Notice::Type += { Test_Notice, }; @@ -15,7 +16,7 @@ event bro_init() &priority=5 [$name="foo-bar", $break_interval=3secs, $note=Test_Notice, - $notice_threshold=1, + $notice_threshold=2, $log=F]); Metrics::add_data(TEST_METRIC, [$host=1.2.3.4], 3); Metrics::add_data(TEST_METRIC, [$host=6.5.4.3], 2);