diff --git a/scripts/base/frameworks/intel/cluster.bro b/scripts/base/frameworks/intel/cluster.bro index 7791c334d5..e8fae8327c 100644 --- a/scripts/base/frameworks/intel/cluster.bro +++ b/scripts/base/frameworks/intel/cluster.bro @@ -20,16 +20,11 @@ redef have_full_data = F; global cluster_new_item: event(item: Item); # Primary intelligence distribution comes from manager. -redef Cluster::manager2worker_events += /^Intel::(cluster_new_item)$/; +redef Cluster::manager2worker_events += /^Intel::(cluster_new_item|purge_item)$/; # If a worker finds intelligence and adds it, it should share it back to the manager. -redef Cluster::worker2manager_events += /^Intel::(cluster_new_item|match_no_items)$/; +redef Cluster::worker2manager_events += /^Intel::(cluster_new_item|remove_item|match_no_items)$/; @if ( Cluster::local_node_type() == Cluster::MANAGER ) -event Intel::match_no_items(s: Seen) &priority=5 - { - event Intel::match(s, Intel::get_items(s)); - } - event remote_connection_handshake_done(p: event_peer) { # When a worker connects, send it the complete minimal data store. @@ -39,6 +34,17 @@ event remote_connection_handshake_done(p: event_peer) send_id(p, "Intel::min_data_store"); } } + +event Intel::match_no_items(s: Seen) &priority=5 + { + if ( Intel::find(s) ) + event Intel::match(s, Intel::get_items(s)); + } + +event Intel::remove_item(item: Item, purge_indicator: bool) + { + remove(item, purge_indicator); + } @endif event Intel::cluster_new_item(item: Intel::Item) &priority=5 diff --git a/scripts/base/frameworks/intel/main.bro b/scripts/base/frameworks/intel/main.bro index f3bceec25e..8e387f71f6 100644 --- a/scripts/base/frameworks/intel/main.bro +++ b/scripts/base/frameworks/intel/main.bro @@ -136,6 +136,10 @@ export { ## Intelligence data manipulation function. global insert: function(item: Item); + ## Function to remove intelligence data. If purge_indicator is set, the + ## given meta data is ignored and the indicator is removed completely. + global remove: function(item: Item, purge_indicator: bool &default = F); + ## Function to declare discovery of a piece of data in order to check ## it against known intelligence for matches. global seen: function(s: Seen); @@ -157,6 +161,8 @@ global match_no_items: event(s: Seen); # Internal events for cluster data distribution. global new_item: event(item: Item); +global remove_item: event(item: Item, purge_indicator: bool); +global purge_item: event(item: Item); # Optionally store metadata. This is used internally depending on # if this is a cluster deployment or not. @@ -191,14 +197,16 @@ event bro_init() &priority=5 function find(s: Seen): bool { + local ds = have_full_data ? data_store : min_data_store; + if ( s?$host ) { - return ((s$host in min_data_store$host_data) || - (|matching_subnets(addr_to_subnet(s$host), min_data_store$subnet_data)| > 0)); + return ((s$host in ds$host_data) || + (|matching_subnets(addr_to_subnet(s$host), ds$subnet_data)| > 0)); } else { - return ([to_lower(s$indicator), s$indicator_type] in min_data_store$string_data); + return ([to_lower(s$indicator), s$indicator_type] in ds$string_data); } } @@ -385,4 +393,83 @@ function insert(item: Item) # or insert was called on a worker event Intel::new_item(item); } - + +# Function to remove meta data of an item. The function returns T +# if there is no meta data left for the given indicator. +function remove_meta_data(item: Item): bool + { + if ( ! have_full_data ) + { + Reporter::warning(fmt("Intel::remove_meta_data was called from a host (%s) that doesn't have the full data.", + peer_description)); + return F; + } + + switch ( item$indicator_type ) + { + case ADDR: + local host = to_addr(item$indicator); + delete data_store$host_data[host][item$meta$source]; + return (|data_store$host_data[host]| == 0); + case SUBNET: + local net = to_subnet(item$indicator); + delete data_store$subnet_data[net][item$meta$source]; + return (|data_store$subnet_data[net]| == 0); + default: + delete data_store$string_data[item$indicator, item$indicator_type][item$meta$source]; + return (|data_store$string_data[item$indicator, item$indicator_type]| == 0); + } + } + +function remove(item: Item, purge_indicator: bool) + { + # Delegate removal if we are on a worker + if ( !have_full_data ) + { + event Intel::remove_item(item, purge_indicator); + return; + } + + # Remove meta data from manager's data store + local no_meta_data = remove_meta_data(item); + # Remove whole indicator if necessary + if ( no_meta_data || purge_indicator ) + { + switch ( item$indicator_type ) + { + case ADDR: + local host = to_addr(item$indicator); + delete data_store$host_data[host]; + break; + case SUBNET: + local net = to_subnet(item$indicator); + delete data_store$subnet_data[net]; + break; + default: + delete data_store$string_data[item$indicator, item$indicator_type]; + break; + } + # Trigger deletion in min data stores + event Intel::purge_item(item); + } + } + +event purge_item(item: Item) + { + # Remove data from min data store + switch ( item$indicator_type ) + { + case ADDR: + local host = to_addr(item$indicator); + delete min_data_store$host_data[host]; + break; + case SUBNET: + local net = to_subnet(item$indicator); + delete min_data_store$subnet_data[net]; + break; + default: + delete min_data_store$string_data[item$indicator, item$indicator_type]; + break; + } + } + diff --git a/testing/btest/Baseline/scripts.base.frameworks.intel.remove-item-cluster/manager-1..stdout b/testing/btest/Baseline/scripts.base.frameworks.intel.remove-item-cluster/manager-1..stdout new file mode 100644 index 0000000000..17862ce14b --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.intel.remove-item-cluster/manager-1..stdout @@ -0,0 +1,6 @@ +Purging 192.168.0.1. +Purging 192.168.0.2. +Removing 192.168.1.2 (source: source1). +Removing 192.168.1.2 (source: source2). +Purging 192.168.1.2. +Logging intel hit! diff --git a/testing/btest/Baseline/scripts.base.frameworks.intel.remove-item-cluster/manager-1.intel.log b/testing/btest/Baseline/scripts.base.frameworks.intel.remove-item-cluster/manager-1.intel.log new file mode 100644 index 0000000000..bb3541ba32 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.intel.remove-item-cluster/manager-1.intel.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path intel +#open 2016-03-30-16-01-31 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc seen.indicator seen.indicator_type seen.where seen.node matched sources +#types time string addr port addr port string string string string enum enum string set[enum] set[string] +1459353691.470304 - - - - - - - - 10.10.10.10 Intel::ADDR Intel::IN_ANYWHERE worker-1 Intel::ADDR end +#close 2016-03-30-16-01-41 diff --git a/testing/btest/Baseline/scripts.base.frameworks.intel.remove-item-cluster/worker-1..stdout b/testing/btest/Baseline/scripts.base.frameworks.intel.remove-item-cluster/worker-1..stdout new file mode 100644 index 0000000000..042032cb9d --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.intel.remove-item-cluster/worker-1..stdout @@ -0,0 +1,5 @@ +Removing 192.168.1.2 (source: source1). +Removing 192.168.1.2 (source: source2). +Purging 192.168.0.1. +Purging 192.168.0.2. +Purging 192.168.1.2. diff --git a/testing/btest/scripts/base/frameworks/intel/remove-item-cluster.bro b/testing/btest/scripts/base/frameworks/intel/remove-item-cluster.bro new file mode 100644 index 0000000000..d13536a015 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/intel/remove-item-cluster.bro @@ -0,0 +1,88 @@ +# @TEST-SERIALIZE: comm +# +# @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT +# @TEST-EXEC: btest-bg-run worker-1 BROPATH=$BROPATH:.. CLUSTER_NODE=worker-1 bro %INPUT +# @TEST-EXEC: btest-bg-wait -k 10 +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff manager-1/.stdout +# @TEST-EXEC: TEST_DIFF_CANONIFIER=$SCRIPTS/diff-sort btest-diff worker-1/.stdout +# @TEST-EXEC: btest-diff manager-1/intel.log + +# @TEST-START-FILE cluster-layout.bro +redef Cluster::nodes = { + ["manager-1"] = [$node_type=Cluster::MANAGER, $ip=127.0.0.1, $p=37757/tcp, $workers=set("worker-1")], + ["worker-1"] = [$node_type=Cluster::WORKER, $ip=127.0.0.1, $p=37760/tcp, $manager="manager-1"], +}; +# @TEST-END-FILE + +@load base/frameworks/control + +module Intel; + +redef Log::default_rotation_interval=0sec; + +event test_manager() + { + Intel::remove([$indicator="192.168.0.1", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); + Intel::seen([$host=192.168.0.1, $where=Intel::IN_ANYWHERE]); + Intel::remove([$indicator="192.168.0.2", $indicator_type=Intel::ADDR, $meta=[$source="source1"]], T); + Intel::seen([$host=192.168.0.2, $where=Intel::IN_ANYWHERE]); + } + +event test_worker() + { + Intel::remove([$indicator="192.168.1.2", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); + Intel::remove([$indicator="192.168.1.2", $indicator_type=Intel::ADDR, $meta=[$source="source2"]]); + Intel::seen([$host=192.168.1.2, $where=Intel::IN_ANYWHERE]); + # Trigger shutdown by matching data that should be present + Intel::seen([$host=10.10.10.10, $where=Intel::IN_ANYWHERE]); + } + +event remote_connection_handshake_done(p: event_peer) + { + # Insert the data once all workers are connected. + if ( Cluster::local_node_type() == Cluster::MANAGER && Cluster::worker_count == 1 ) + { + Intel::insert([$indicator="192.168.0.1", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); + Intel::insert([$indicator="192.168.0.2", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); + Intel::insert([$indicator="192.168.0.2", $indicator_type=Intel::ADDR, $meta=[$source="source2"]]); + Intel::insert([$indicator="192.168.1.2", $indicator_type=Intel::ADDR, $meta=[$source="source1"]]); + Intel::insert([$indicator="192.168.1.2", $indicator_type=Intel::ADDR, $meta=[$source="source2"]]); + Intel::insert([$indicator="10.10.10.10", $indicator_type=Intel::ADDR, $meta=[$source="end"]]); + + event test_manager(); + } + } + +global worker_data = 0; +event Intel::cluster_new_item(item: Intel::Item) + { + # Run test on worker-1 when all items have been inserted + if ( Cluster::node == "worker-1" ) + { + ++worker_data; + if ( worker_data == 4 ) + event test_worker(); + } + } + +event Intel::remove_item(item: Item, purge_indicator: bool) + { + print fmt("Removing %s (source: %s).", item$indicator, item$meta$source); + } + +event purge_item(item: Item) + { + print fmt("Purging %s.", item$indicator); + } + +event Intel::log_intel(rec: Intel::Info) + { + print "Logging intel hit!"; + event Control::shutdown_request(); + } + +event remote_connection_closed(p: event_peer) + { + # Cascading termination + terminate_communication(); + }