mirror of
https://github.com/zeek/zeek.git
synced 2025-10-08 01:28:20 +00:00
Merge remote branch 'origin/master' into topic/bernhard/hyperloglog
Conflicts: src/3rdparty
This commit is contained in:
commit
74f96d22ef
232 changed files with 9163 additions and 148274 deletions
|
@ -7,6 +7,10 @@ export {
|
|||
## The prefix where files are extracted to.
|
||||
const prefix = "./extract_files/" &redef;
|
||||
|
||||
## The default max size for extracted files (they won't exceed this
|
||||
## number of bytes), unlimited.
|
||||
const default_limit = 0 &redef;
|
||||
|
||||
redef record Files::Info += {
|
||||
## Local filenames of extracted file.
|
||||
extracted: string &optional &log;
|
||||
|
@ -17,9 +21,32 @@ export {
|
|||
## This field is used in the core by the extraction plugin
|
||||
## to know where to write the file to. It's also optional
|
||||
extract_filename: string &optional;
|
||||
## The maximum allowed file size in bytes of *extract_filename*.
|
||||
## Once reached, a :bro:see:`file_extraction_limit` event is
|
||||
## raised and the analyzer will be removed unless
|
||||
## :bro:see:`FileExtract::set_limit` is called to increase the
|
||||
## limit. A value of zero means "no limit".
|
||||
extract_limit: count &default=default_limit;
|
||||
};
|
||||
|
||||
## Sets the maximum allowed extracted file size.
|
||||
##
|
||||
## f: A file that's being extracted.
|
||||
##
|
||||
## args: Arguments that identify a file extraction analyzer.
|
||||
##
|
||||
## n: Allowed number of bytes to be extracted.
|
||||
##
|
||||
## Returns: false if a file extraction analyzer wasn't active for
|
||||
## the file, else true.
|
||||
global set_limit: function(f: fa_file, args: Files::AnalyzerArgs, n: count): bool;
|
||||
}
|
||||
|
||||
function set_limit(f: fa_file, args: Files::AnalyzerArgs, n: count): bool
|
||||
{
|
||||
return __set_limit(f$id, args, n);
|
||||
}
|
||||
|
||||
function on_add(f: fa_file, args: Files::AnalyzerArgs)
|
||||
{
|
||||
if ( ! args?$extract_filename )
|
||||
|
@ -27,12 +54,10 @@ function on_add(f: fa_file, args: Files::AnalyzerArgs)
|
|||
|
||||
f$info$extracted = args$extract_filename;
|
||||
args$extract_filename = build_path_compressed(prefix, args$extract_filename);
|
||||
mkdir(prefix);
|
||||
}
|
||||
|
||||
event bro_init() &priority=10
|
||||
{
|
||||
Files::register_analyzer_add_callback(Files::ANALYZER_EXTRACT, on_add);
|
||||
|
||||
# Create the extraction directory.
|
||||
mkdir(prefix);
|
||||
}
|
||||
}
|
||||
|
|
1
scripts/base/files/unified2/__load__.bro
Normal file
1
scripts/base/files/unified2/__load__.bro
Normal file
|
@ -0,0 +1 @@
|
|||
@load ./main
|
244
scripts/base/files/unified2/main.bro
Normal file
244
scripts/base/files/unified2/main.bro
Normal file
|
@ -0,0 +1,244 @@
|
|||
|
||||
@load base/utils/dir
|
||||
@load base/utils/paths
|
||||
|
||||
module Unified2;
|
||||
|
||||
export {
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
## Directory to watch for Unified2 files.
|
||||
const watch_file = "" &redef;
|
||||
|
||||
## File to watch for Unified2 records.
|
||||
const watch_dir = "" &redef;
|
||||
|
||||
## The sid-msg.map file you would like to use for your alerts.
|
||||
const sid_msg = "" &redef;
|
||||
|
||||
## The gen-msg.map file you would like to use for your alerts.
|
||||
const gen_msg = "" &redef;
|
||||
|
||||
## The classification.config file you would like to use for your alerts.
|
||||
const classification_config = "" &redef;
|
||||
|
||||
## Reconstructed "alert" which combines related events
|
||||
## and packets.
|
||||
global alert: event(f: fa_file, ev: Unified2::IDSEvent, pkt: Unified2::Packet);
|
||||
|
||||
type PacketID: record {
|
||||
src_ip: addr;
|
||||
src_p: port;
|
||||
dst_ip: addr;
|
||||
dst_p: port;
|
||||
} &log;
|
||||
|
||||
type Info: record {
|
||||
## Timestamp attached to the alert.
|
||||
ts: time &log;
|
||||
## Addresses and ports for the connection.
|
||||
id: PacketID &log;
|
||||
## Sensor that originated this event.
|
||||
sensor_id: count &log;
|
||||
## Sig id for this generator.
|
||||
signature_id: count &log;
|
||||
## A string representation of the "signature_id" field if a sid_msg.map file was loaded.
|
||||
signature: string &log &optional;
|
||||
## Which generator generated the alert?
|
||||
generator_id: count &log;
|
||||
## A string representation of the "generator_id" field if a gen_msg.map file was loaded.
|
||||
generator: string &log &optional;
|
||||
## Sig revision for this id.
|
||||
signature_revision: count &log;
|
||||
## Event classification.
|
||||
classification_id: count &log;
|
||||
## Descriptive classification string,
|
||||
classification: string &log &optional;
|
||||
## Event priority.
|
||||
priority_id: count &log;
|
||||
## Event ID.
|
||||
event_id: count &log;
|
||||
## Some of the packet data.
|
||||
packet: string &log &optional;
|
||||
} &log;
|
||||
|
||||
## The event for accessing logged records.
|
||||
global log_unified2: event(rec: Info);
|
||||
}
|
||||
|
||||
# Mappings for extended information from alerts.
|
||||
global classification_map: table[count] of string;
|
||||
global sid_map: table[count] of string;
|
||||
global gen_map: table[count] of string;
|
||||
|
||||
# For reading in config files.
|
||||
type OneLine: record {
|
||||
line: string;
|
||||
};
|
||||
|
||||
function create_info(ev: IDSEvent): Info
|
||||
{
|
||||
local info = Info($ts=ev$ts,
|
||||
$id=PacketID($src_ip=ev$src_ip, $src_p=ev$src_p,
|
||||
$dst_ip=ev$dst_ip, $dst_p=ev$dst_p),
|
||||
$sensor_id=ev$sensor_id,
|
||||
$signature_id=ev$signature_id,
|
||||
$generator_id=ev$generator_id,
|
||||
$signature_revision=ev$signature_revision,
|
||||
$classification_id=ev$classification_id,
|
||||
$priority_id=ev$priority_id,
|
||||
$event_id=ev$event_id);
|
||||
|
||||
if ( ev$signature_id in sid_map )
|
||||
info$signature=sid_map[ev$signature_id];
|
||||
if ( ev$generator_id in gen_map )
|
||||
info$generator=gen_map[ev$generator_id];
|
||||
if ( ev$classification_id in classification_map )
|
||||
info$classification=classification_map[ev$classification_id];
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
redef record fa_file += {
|
||||
## Recently received IDS events. This is primarily used
|
||||
## for tying together Unified2 events and packets.
|
||||
u2_events: table[count] of Unified2::IDSEvent
|
||||
&optional &create_expire=5sec
|
||||
&expire_func=function(t: table[count] of Unified2::IDSEvent, event_id: count): interval
|
||||
{
|
||||
Log::write(LOG, create_info(t[event_id]));
|
||||
return 0secs;
|
||||
};
|
||||
};
|
||||
|
||||
event Unified2::read_sid_msg_line(desc: Input::EventDescription, tpe: Input::Event, line: string)
|
||||
{
|
||||
local parts = split_n(line, / \|\| /, F, 100);
|
||||
if ( |parts| >= 2 && /^[0-9]+$/ in parts[1] )
|
||||
sid_map[to_count(parts[1])] = parts[2];
|
||||
}
|
||||
|
||||
event Unified2::read_gen_msg_line(desc: Input::EventDescription, tpe: Input::Event, line: string)
|
||||
{
|
||||
local parts = split_n(line, / \|\| /, F, 3);
|
||||
if ( |parts| >= 2 && /^[0-9]+$/ in parts[1] )
|
||||
gen_map[to_count(parts[1])] = parts[3];
|
||||
}
|
||||
|
||||
event Unified2::read_classification_line(desc: Input::EventDescription, tpe: Input::Event, line: string)
|
||||
{
|
||||
local parts = split_n(line, /: /, F, 2);
|
||||
if ( |parts| == 2 )
|
||||
{
|
||||
local parts2 = split_n(parts[2], /,/, F, 4);
|
||||
if ( |parts2| > 1 )
|
||||
classification_map[|classification_map|+1] = parts2[1];
|
||||
}
|
||||
}
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(Unified2::LOG, [$columns=Info, $ev=log_unified2]);
|
||||
|
||||
if ( sid_msg != "" )
|
||||
{
|
||||
Input::add_event([$source=sid_msg,
|
||||
$reader=Input::READER_RAW,
|
||||
$mode=Input::REREAD,
|
||||
$name=sid_msg,
|
||||
$fields=Unified2::OneLine,
|
||||
$want_record=F,
|
||||
$ev=Unified2::read_sid_msg_line]);
|
||||
}
|
||||
|
||||
if ( gen_msg != "" )
|
||||
{
|
||||
Input::add_event([$source=gen_msg,
|
||||
$name=gen_msg,
|
||||
$reader=Input::READER_RAW,
|
||||
$mode=Input::REREAD,
|
||||
$fields=Unified2::OneLine,
|
||||
$want_record=F,
|
||||
$ev=Unified2::read_gen_msg_line]);
|
||||
}
|
||||
|
||||
if ( classification_config != "" )
|
||||
{
|
||||
Input::add_event([$source=classification_config,
|
||||
$name=classification_config,
|
||||
$reader=Input::READER_RAW,
|
||||
$mode=Input::REREAD,
|
||||
$fields=Unified2::OneLine,
|
||||
$want_record=F,
|
||||
$ev=Unified2::read_classification_line]);
|
||||
}
|
||||
|
||||
if ( watch_dir != "" )
|
||||
{
|
||||
Dir::monitor(watch_dir, function(fname: string)
|
||||
{
|
||||
Input::add_analysis([$source=fname,
|
||||
$reader=Input::READER_BINARY,
|
||||
$mode=Input::STREAM,
|
||||
$name=fname]);
|
||||
}, 10secs);
|
||||
}
|
||||
|
||||
if ( watch_file != "" )
|
||||
{
|
||||
Input::add_analysis([$source=watch_file,
|
||||
$reader=Input::READER_BINARY,
|
||||
$mode=Input::STREAM,
|
||||
$name=watch_file]);
|
||||
}
|
||||
}
|
||||
|
||||
event file_new(f: fa_file)
|
||||
{
|
||||
local file_dir = "";
|
||||
local parts = split_all(f$source, /\/[^\/]*$/);
|
||||
if ( |parts| == 3 )
|
||||
file_dir = parts[1];
|
||||
|
||||
if ( (watch_file != "" && f$source == watch_file) ||
|
||||
(watch_dir != "" && compress_path(watch_dir) == file_dir) )
|
||||
{
|
||||
Files::add_analyzer(f, Files::ANALYZER_UNIFIED2);
|
||||
f$u2_events = table();
|
||||
}
|
||||
}
|
||||
|
||||
event unified2_event(f: fa_file, ev: Unified2::IDSEvent)
|
||||
{
|
||||
f$u2_events[ev$event_id] = ev;
|
||||
}
|
||||
|
||||
event unified2_packet(f: fa_file, pkt: Unified2::Packet)
|
||||
{
|
||||
if ( f?$u2_events && pkt$event_id in f$u2_events)
|
||||
{
|
||||
local ev = f$u2_events[pkt$event_id];
|
||||
event Unified2::alert(f, ev, pkt);
|
||||
delete f$u2_events[pkt$event_id];
|
||||
}
|
||||
}
|
||||
|
||||
event Unified2::alert(f: fa_file, ev: IDSEvent, pkt: Packet)
|
||||
{
|
||||
local info = create_info(ev);
|
||||
info$packet=pkt$data;
|
||||
Log::write(LOG, info);
|
||||
}
|
||||
|
||||
event file_state_remove(f: fa_file)
|
||||
{
|
||||
if ( f?$u2_events )
|
||||
{
|
||||
# In case any events never had matching packets, flush
|
||||
# the extras to the log.
|
||||
for ( i in f$u2_events )
|
||||
{
|
||||
Log::write(LOG, create_info(f$u2_events[i]));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -27,6 +27,9 @@ export {
|
|||
## File hash which is non-hash type specific. It's up to the user to query
|
||||
## for any relevant hash types.
|
||||
FILE_HASH,
|
||||
## File names. Typically with protocols with definite indications
|
||||
## of a file name.
|
||||
FILE_NAME,
|
||||
## Certificate SHA-1 hash.
|
||||
CERT_HASH,
|
||||
};
|
||||
|
@ -80,6 +83,10 @@ export {
|
|||
## If the data was discovered within a connection, the
|
||||
## connection record should go into get to give context to the data.
|
||||
conn: connection &optional;
|
||||
|
||||
## If the data was discovered within a file, the file record
|
||||
## should go here to provide context to the data.
|
||||
f: fa_file &optional;
|
||||
};
|
||||
|
||||
## Record used for the logging framework representing a positive
|
||||
|
@ -95,6 +102,16 @@ export {
|
|||
## this is the conn_id for the connection.
|
||||
id: conn_id &log &optional;
|
||||
|
||||
## If a file was associated with this intelligence hit,
|
||||
## this is the uid for the file.
|
||||
fuid: string &log &optional;
|
||||
## A mime type if the intelligence hit is related to a file.
|
||||
## If the $f field is provided this will be automatically filled out.
|
||||
file_mime_type: string &log &optional;
|
||||
## Frequently files can be "described" to give a bit more context.
|
||||
## If the $f field is provided this field will be automatically filled out.
|
||||
file_desc: string &log &optional;
|
||||
|
||||
## Where the data was seen.
|
||||
seen: Seen &log;
|
||||
## Sources which supplied data that resulted in this match.
|
||||
|
@ -248,7 +265,25 @@ function has_meta(check: MetaData, metas: set[MetaData]): bool
|
|||
|
||||
event Intel::match(s: Seen, items: set[Item]) &priority=5
|
||||
{
|
||||
local info: Info = [$ts=network_time(), $seen=s];
|
||||
local info = Info($ts=network_time(), $seen=s);
|
||||
|
||||
if ( s?$f )
|
||||
{
|
||||
if ( s$f?$conns && |s$f$conns| == 1 )
|
||||
{
|
||||
for ( cid in s$f$conns )
|
||||
s$conn = s$f$conns[cid];
|
||||
}
|
||||
|
||||
if ( ! info?$fuid )
|
||||
info$fuid = s$f$id;
|
||||
|
||||
if ( ! info?$file_mime_type && s$f?$mime_type )
|
||||
info$file_mime_type = s$f$mime_type;
|
||||
|
||||
if ( ! info?$file_desc )
|
||||
info$file_desc = Files::describe(s$f);
|
||||
}
|
||||
|
||||
if ( s?$conn )
|
||||
{
|
||||
|
|
|
@ -60,7 +60,7 @@ export {
|
|||
|
||||
# Add events to the cluster framework to make this work.
|
||||
redef Cluster::manager2worker_events += /SumStats::cluster_(ss_request|get_result|threshold_crossed)/;
|
||||
redef Cluster::manager2worker_events += /SumStats::(thresholds_reset|get_a_key)/;
|
||||
redef Cluster::manager2worker_events += /SumStats::(get_a_key)/;
|
||||
redef Cluster::worker2manager_events += /SumStats::cluster_(ss_response|send_result|key_intermediate_response)/;
|
||||
redef Cluster::worker2manager_events += /SumStats::(send_a_key|send_no_key)/;
|
||||
|
||||
|
@ -95,37 +95,6 @@ function data_added(ss: SumStat, key: Key, result: Result)
|
|||
}
|
||||
}
|
||||
|
||||
#event SumStats::send_data(uid: string, ss_name: string, cleanup: bool)
|
||||
# {
|
||||
# #print fmt("WORKER %s: sending data for uid %s...", Cluster::node, uid);
|
||||
#
|
||||
# local local_data: ResultTable = table();
|
||||
# local incoming_data: ResultTable = cleanup ? data : copy(data);
|
||||
#
|
||||
# local num_added = 0;
|
||||
# for ( key in incoming_data )
|
||||
# {
|
||||
# local_data[key] = incoming_data[key];
|
||||
# delete incoming_data[key];
|
||||
#
|
||||
# # Only send cluster_send_in_groups_of at a time. Queue another
|
||||
# # event to send the next group.
|
||||
# if ( cluster_send_in_groups_of == ++num_added )
|
||||
# break;
|
||||
# }
|
||||
#
|
||||
# local done = F;
|
||||
# # If data is empty, this sumstat is done.
|
||||
# if ( |incoming_data| == 0 )
|
||||
# done = T;
|
||||
#
|
||||
# # Note: copy is needed to compensate serialization caching issue. This should be
|
||||
# # changed to something else later.
|
||||
# event SumStats::cluster_ss_response(uid, ss_name, copy(local_data), done, cleanup);
|
||||
# if ( ! done )
|
||||
# schedule 0.01 sec { SumStats::send_data(uid, T) };
|
||||
# }
|
||||
|
||||
event SumStats::get_a_key(uid: string, ss_name: string, cleanup: bool)
|
||||
{
|
||||
if ( uid in sending_results )
|
||||
|
@ -204,6 +173,8 @@ event SumStats::cluster_get_result(uid: string, ss_name: string, key: Key, clean
|
|||
{
|
||||
if ( ss_name in result_store && key in result_store[ss_name] )
|
||||
{
|
||||
# Note: copy is needed to compensate serialization caching issue. This should be
|
||||
# changed to something else later.
|
||||
event SumStats::cluster_send_result(uid, ss_name, key, copy(result_store[ss_name][key]), cleanup);
|
||||
}
|
||||
else
|
||||
|
@ -223,11 +194,6 @@ event SumStats::cluster_threshold_crossed(ss_name: string, key: SumStats::Key, t
|
|||
threshold_tracker[ss_name][key] = thold_index;
|
||||
}
|
||||
|
||||
event SumStats::thresholds_reset(ss_name: string)
|
||||
{
|
||||
delete threshold_tracker[ss_name];
|
||||
}
|
||||
|
||||
@endif
|
||||
|
||||
|
||||
|
@ -236,7 +202,12 @@ event SumStats::thresholds_reset(ss_name: string)
|
|||
# This variable is maintained by manager nodes as they collect and aggregate
|
||||
# results.
|
||||
# Index on a uid.
|
||||
global stats_keys: table[string] of set[Key] &create_expire=1min;
|
||||
global stats_keys: table[string] of set[Key] &create_expire=1min
|
||||
&expire_func=function(s: table[string] of set[Key], idx: string): interval
|
||||
{
|
||||
Reporter::warning(fmt("SumStat key request for the %s SumStat uid took longer than 1 minute and was automatically cancelled.", idx));
|
||||
return 0secs;
|
||||
};
|
||||
|
||||
# This variable is maintained by manager nodes to track how many "dones" they
|
||||
# collected per collection unique id. Once the number of results for a uid
|
||||
|
@ -251,11 +222,15 @@ global done_with: table[string] of count &create_expire=1min &default=0;
|
|||
# Indexed on a uid.
|
||||
global key_requests: table[string] of Result &create_expire=1min;
|
||||
|
||||
# Store uids for dynamic requests here to avoid cleanup on the uid.
|
||||
# (This needs to be done differently!)
|
||||
global dynamic_requests: set[string] &create_expire=1min;
|
||||
|
||||
# This variable is maintained by managers to prevent overwhelming communication due
|
||||
# to too many intermediate updates. Each sumstat is tracked separately so that
|
||||
# one won't overwhelm and degrade other quieter sumstats.
|
||||
# Indexed on a sumstat id.
|
||||
global outstanding_global_views: table[string] of count &create_expire=1min &default=0;
|
||||
global outstanding_global_views: table[string] of count &read_expire=1min &default=0;
|
||||
|
||||
const zero_time = double_to_time(0.0);
|
||||
# Managers handle logging.
|
||||
|
@ -274,6 +249,7 @@ event SumStats::finish_epoch(ss: SumStat)
|
|||
event SumStats::cluster_ss_request(uid, ss$name, T);
|
||||
|
||||
done_with[uid] = 0;
|
||||
|
||||
#print fmt("get_key by uid: %s", uid);
|
||||
event SumStats::get_a_key(uid, ss$name, T);
|
||||
}
|
||||
|
@ -295,6 +271,12 @@ function data_added(ss: SumStat, key: Key, result: Result)
|
|||
|
||||
function handle_end_of_result_collection(uid: string, ss_name: string, key: Key, cleanup: bool)
|
||||
{
|
||||
if ( uid !in key_requests )
|
||||
{
|
||||
Reporter::warning(fmt("Tried to handle end of result collection with missing uid in key_request sumstat:%s, key:%s.", ss_name, key));
|
||||
return;
|
||||
}
|
||||
|
||||
#print fmt("worker_count:%d :: done_with:%d", Cluster::worker_count, done_with[uid]);
|
||||
local ss = stats_store[ss_name];
|
||||
local ir = key_requests[uid];
|
||||
|
@ -335,12 +317,6 @@ function request_all_current_keys(uid: string, ss_name: string, cleanup: bool)
|
|||
{
|
||||
done_with[uid] = 0;
|
||||
event SumStats::cluster_get_result(uid, ss_name, key, cleanup);
|
||||
when ( uid in done_with && Cluster::worker_count == done_with[uid] )
|
||||
{
|
||||
#print "done getting result";
|
||||
handle_end_of_result_collection(uid, ss_name, key, cleanup);
|
||||
request_all_current_keys(uid, ss_name, cleanup);
|
||||
}
|
||||
delete stats_keys[uid][key];
|
||||
break; # only a single key
|
||||
}
|
||||
|
@ -357,12 +333,16 @@ function request_all_current_keys(uid: string, ss_name: string, cleanup: bool)
|
|||
event SumStats::send_no_key(uid: string, ss_name: string)
|
||||
{
|
||||
#print "send_no_key";
|
||||
|
||||
if ( uid !in done_with )
|
||||
done_with[uid] = 0;
|
||||
|
||||
++done_with[uid];
|
||||
if ( Cluster::worker_count == done_with[uid] )
|
||||
{
|
||||
delete done_with[uid];
|
||||
|
||||
if ( |stats_keys[uid]| > 0 )
|
||||
if ( uid in stats_keys && |stats_keys[uid]| > 0 )
|
||||
{
|
||||
#print "we need more keys!";
|
||||
# Now that we have a key from each worker, lets
|
||||
|
@ -375,6 +355,9 @@ event SumStats::send_no_key(uid: string, ss_name: string)
|
|||
local ss = stats_store[ss_name];
|
||||
if ( ss?$epoch_finished )
|
||||
ss$epoch_finished(network_time());
|
||||
|
||||
delete stats_keys[uid];
|
||||
reset(ss);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -384,7 +367,7 @@ event SumStats::send_a_key(uid: string, ss_name: string, key: Key)
|
|||
#print fmt("send_a_key %s", key);
|
||||
if ( uid !in stats_keys )
|
||||
{
|
||||
# no clue what happened here
|
||||
Reporter::warning(fmt("Manager received a uid for an unknown request. SumStat: %s, Key: %s", ss_name, key));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -409,6 +392,8 @@ event SumStats::send_a_key(uid: string, ss_name: string, key: Key)
|
|||
local ss = stats_store[ss_name];
|
||||
if ( ss?$epoch_finished )
|
||||
ss$epoch_finished(network_time());
|
||||
|
||||
reset(ss);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -426,20 +411,27 @@ event SumStats::cluster_send_result(uid: string, ss_name: string, key: Key, resu
|
|||
key_requests[uid] = compose_results(key_requests[uid], result);
|
||||
|
||||
# Mark that a worker is done.
|
||||
if ( uid !in done_with )
|
||||
done_with[uid] = 0;
|
||||
|
||||
#print fmt("MANAGER: got a result for %s %s from %s", uid, key, get_event_peer()$descr);
|
||||
++done_with[uid];
|
||||
|
||||
#if ( Cluster::worker_count == done_with[uid] )
|
||||
# {
|
||||
# print "done";
|
||||
# handle_end_of_result_collection(uid, ss_name, key, cleanup);
|
||||
# }
|
||||
if ( uid !in dynamic_requests &&
|
||||
uid in done_with && Cluster::worker_count == done_with[uid] )
|
||||
{
|
||||
handle_end_of_result_collection(uid, ss_name, key, cleanup);
|
||||
|
||||
if ( cleanup )
|
||||
request_all_current_keys(uid, ss_name, cleanup);
|
||||
}
|
||||
}
|
||||
|
||||
# Managers handle intermediate updates here.
|
||||
event SumStats::cluster_key_intermediate_response(ss_name: string, key: Key)
|
||||
{
|
||||
#print fmt("MANAGER: receiving intermediate key data from %s", get_event_peer()$descr);
|
||||
#print fmt("MANAGER: requesting key data for %s", key2str(key));
|
||||
#print fmt("MANAGER: requesting key data for %s", key);
|
||||
|
||||
if ( ss_name in outstanding_global_views &&
|
||||
|outstanding_global_views[ss_name]| > max_outstanding_global_views )
|
||||
|
@ -454,110 +446,16 @@ event SumStats::cluster_key_intermediate_response(ss_name: string, key: Key)
|
|||
|
||||
local uid = unique_id("");
|
||||
done_with[uid] = 0;
|
||||
#print fmt("requesting results for: %s", uid);
|
||||
event SumStats::cluster_get_result(uid, ss_name, key, F);
|
||||
when ( uid in done_with && Cluster::worker_count == done_with[uid] )
|
||||
{
|
||||
handle_end_of_result_collection(uid, ss_name, key, F);
|
||||
}
|
||||
timeout 1.1min
|
||||
{
|
||||
Reporter::warning(fmt("Dynamic SumStat intermediate key request for %s (%s) took longer than 1 minute and was automatically cancelled.", ss_name, key));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#event SumStats::cluster_ss_response(uid: string, ss_name: string, data: ResultTable, done: bool, cleanup: bool)
|
||||
# {
|
||||
# #print fmt("MANAGER: receiving results from %s", get_event_peer()$descr);
|
||||
#
|
||||
# # Mark another worker as being "done" for this uid.
|
||||
# if ( done )
|
||||
# ++done_with[uid];
|
||||
#
|
||||
# # We had better only be getting requests for stuff that exists.
|
||||
# if ( ss_name !in stats_store )
|
||||
# return;
|
||||
#
|
||||
# if ( uid !in stats_keys )
|
||||
# stats_keys[uid] = table();
|
||||
#
|
||||
# local local_data = stats_keys[uid];
|
||||
# local ss = stats_store[ss_name];
|
||||
#
|
||||
# for ( key in data )
|
||||
# {
|
||||
# if ( key in local_data )
|
||||
# local_data[key] = compose_results(local_data[key], data[key]);
|
||||
# else
|
||||
# local_data[key] = data[key];
|
||||
#
|
||||
# # If a stat is done being collected, thresholds for each key
|
||||
# # need to be checked so we're doing it here to avoid doubly
|
||||
# # iterating over each key.
|
||||
# if ( Cluster::worker_count == done_with[uid] )
|
||||
# {
|
||||
# if ( check_thresholds(ss, key, local_data[key], 1.0) )
|
||||
# {
|
||||
# threshold_crossed(ss, key, local_data[key]);
|
||||
# event SumStats::cluster_threshold_crossed(ss$name, key, threshold_tracker[ss$name][key]);
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# # If the data has been collected from all peers, we are done and ready to finish.
|
||||
# if ( cleanup && Cluster::worker_count == done_with[uid] )
|
||||
# {
|
||||
# local now = network_time();
|
||||
# if ( ss?$epoch_result )
|
||||
# {
|
||||
# for ( key in local_data )
|
||||
# ss$epoch_result(now, key, local_data[key]);
|
||||
# }
|
||||
#
|
||||
# if ( ss?$epoch_finished )
|
||||
# ss$epoch_finished(now);
|
||||
#
|
||||
# # Clean up
|
||||
# delete stats_keys[uid];
|
||||
# delete done_with[uid];
|
||||
# reset(ss);
|
||||
# }
|
||||
# }
|
||||
|
||||
#function request(ss_name: string): ResultTable
|
||||
# {
|
||||
# # This only needs to be implemented this way for cluster compatibility.
|
||||
# local uid = unique_id("dyn-");
|
||||
# stats_keys[uid] = table();
|
||||
# done_with[uid] = 0;
|
||||
# event SumStats::cluster_ss_request(uid, ss_name, F);
|
||||
#
|
||||
# return when ( uid in done_with && Cluster::worker_count == done_with[uid] )
|
||||
# {
|
||||
# if ( uid in stats_keys )
|
||||
# {
|
||||
# local ss_result = stats_keys[uid];
|
||||
# # Clean up
|
||||
# delete stats_keys[uid];
|
||||
# delete done_with[uid];
|
||||
# reset(stats_store[ss_name]);
|
||||
# return ss_result;
|
||||
# }
|
||||
# else
|
||||
# return table();
|
||||
# }
|
||||
# timeout 1.1min
|
||||
# {
|
||||
# Reporter::warning(fmt("Dynamic SumStat request for %s took longer than 1 minute and was automatically cancelled.", ss_name));
|
||||
# return table();
|
||||
# }
|
||||
# }
|
||||
|
||||
function request_key(ss_name: string, key: Key): Result
|
||||
{
|
||||
local uid = unique_id("");
|
||||
done_with[uid] = 0;
|
||||
key_requests[uid] = table();
|
||||
add dynamic_requests[uid];
|
||||
|
||||
event SumStats::cluster_get_result(uid, ss_name, key, F);
|
||||
return when ( uid in done_with && Cluster::worker_count == done_with[uid] )
|
||||
|
@ -567,13 +465,14 @@ function request_key(ss_name: string, key: Key): Result
|
|||
# Clean up
|
||||
delete key_requests[uid];
|
||||
delete done_with[uid];
|
||||
delete dynamic_requests[uid];
|
||||
|
||||
return result;
|
||||
}
|
||||
timeout 1.1min
|
||||
{
|
||||
Reporter::warning(fmt("Dynamic SumStat key request for %s (%s) took longer than 1 minute and was automatically cancelled.", ss_name, key));
|
||||
return table();
|
||||
Reporter::warning(fmt("Dynamic SumStat key request for %s in SumStat %s took longer than 1 minute and was automatically cancelled.", key, ss_name));
|
||||
return Result();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -153,11 +153,6 @@ export {
|
|||
## Returns: The result for the requested sumstat key.
|
||||
global request_key: function(ss_name: string, key: Key): Result;
|
||||
|
||||
## This event is generated when thresholds are reset for a SumStat.
|
||||
##
|
||||
## name: SumStats name that thresholds were reset for.
|
||||
global thresholds_reset: event(name: string);
|
||||
|
||||
## Helper function to represent a :bro:type:`SumStats::Key` value as
|
||||
## a simple string.
|
||||
##
|
||||
|
@ -321,7 +316,6 @@ function reset(ss: SumStat)
|
|||
{
|
||||
delete threshold_tracker[ss$name];
|
||||
threshold_tracker[ss$name] = table();
|
||||
event SumStats::thresholds_reset(ss$name);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2685,6 +2685,42 @@ type ModbusHeaders: record {
|
|||
function_code: count;
|
||||
};
|
||||
|
||||
module Unified2;
|
||||
export {
|
||||
type Unified2::IDSEvent: record {
|
||||
sensor_id: count;
|
||||
event_id: count;
|
||||
ts: time;
|
||||
signature_id: count;
|
||||
generator_id: count;
|
||||
signature_revision: count;
|
||||
classification_id: count;
|
||||
priority_id: count;
|
||||
src_ip: addr;
|
||||
dst_ip: addr;
|
||||
src_p: port;
|
||||
dst_p: port;
|
||||
impact_flag: count;
|
||||
impact: count;
|
||||
blocked: count;
|
||||
## Not available in "legacy" IDS events.
|
||||
mpls_label: count &optional;
|
||||
## Not available in "legacy" IDS events.
|
||||
vlan_id: count &optional;
|
||||
## Only available in "legacy" IDS events.
|
||||
packet_action: count &optional;
|
||||
};
|
||||
|
||||
type Unified2::Packet: record {
|
||||
sensor_id: count;
|
||||
event_id: count;
|
||||
event_second: count;
|
||||
packet_ts: time;
|
||||
link_type: count;
|
||||
data: string;
|
||||
};
|
||||
}
|
||||
|
||||
module SOCKS;
|
||||
export {
|
||||
## This record is for a SOCKS client or server to provide either a
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
|
||||
@load base/protocols/conn
|
||||
@load base/protocols/dhcp
|
||||
@load base/protocols/dnp3
|
||||
@load base/protocols/dns
|
||||
@load base/protocols/ftp
|
||||
@load base/protocols/http
|
||||
|
@ -55,5 +56,7 @@
|
|||
|
||||
@load base/files/hash
|
||||
@load base/files/extract
|
||||
@load base/files/unified2
|
||||
|
||||
|
||||
@load base/misc/find-checksum-offloading
|
||||
|
|
3
scripts/base/protocols/dnp3/__load__.bro
Normal file
3
scripts/base/protocols/dnp3/__load__.bro
Normal file
|
@ -0,0 +1,3 @@
|
|||
@load ./main
|
||||
|
||||
@load-sigs ./dpd.sig
|
49
scripts/base/protocols/dnp3/consts.bro
Normal file
49
scripts/base/protocols/dnp3/consts.bro
Normal file
|
@ -0,0 +1,49 @@
|
|||
|
||||
module DNP3;
|
||||
|
||||
export {
|
||||
## Standard defined Modbus function codes.
|
||||
const function_codes = {
|
||||
# Requests.
|
||||
[0x00] = "CONFIRM",
|
||||
[0x01] = "READ",
|
||||
[0x02] = "WRITE",
|
||||
[0x03] = "SELECT",
|
||||
[0x04] = "OPERATE",
|
||||
[0x05] = "DIRECT_OPERATE",
|
||||
[0x06] = "DIRECT_OPERATE_NR",
|
||||
[0x07] = "IMMED_FREEZE",
|
||||
[0x08] = "IMMED_FREEZE_NR",
|
||||
[0x09] = "FREEZE_CLEAR",
|
||||
[0x0a] = "FREEZE_CLEAR_NR",
|
||||
[0x0b] = "FREEZE_AT_TIME",
|
||||
[0x0c] = "FREEZE_AT_TIME_NR",
|
||||
[0x0d] = "COLD_RESTART",
|
||||
[0x0e] = "WARM_RESTART",
|
||||
[0x0f] = "INITIALIZE_DATA",
|
||||
[0x10] = "INITIALIZE_APPL",
|
||||
[0x11] = "START_APPL",
|
||||
[0x12] = "STOP_APPL",
|
||||
[0x13] = "SAVE_CONFIG",
|
||||
[0x14] = "ENABLE_UNSOLICITED",
|
||||
[0x15] = "DISABLE_UNSOLICITED",
|
||||
[0x16] = "ASSIGN_CLASS",
|
||||
[0x17] = "DELAY_MEASURE",
|
||||
[0x18] = "RECORD_CURRENT_TIME",
|
||||
[0x19] = "OPEN_FILE",
|
||||
[0x1a] = "CLOSE_FILE",
|
||||
[0x1b] = "DELETE_FILE",
|
||||
[0x1c] = "GET_FILE_INFO",
|
||||
[0x1d] = "AUTHENTICATE_FILE",
|
||||
[0x1e] = "ABORT_FILE",
|
||||
[0x1f] = "ACTIVATE_CONFIG",
|
||||
[0x20] = "AUTHENTICATE_REQ",
|
||||
[0x21] = "AUTHENTICATE_ERR",
|
||||
|
||||
# Responses.
|
||||
[0x81] = "RESPONSE",
|
||||
[0x82] = "UNSOLICITED_RESPONSE",
|
||||
[0x83] = "AUTHENTICATE_RESP",
|
||||
} &default=function(i: count):string { return fmt("unknown-%d", i); } &redef;
|
||||
}
|
||||
|
9
scripts/base/protocols/dnp3/dpd.sig
Normal file
9
scripts/base/protocols/dnp3/dpd.sig
Normal file
|
@ -0,0 +1,9 @@
|
|||
|
||||
# DNP3 packets always starts with 0x05 0x64 .
|
||||
|
||||
signature dpd_dnp3_server {
|
||||
ip-proto == tcp
|
||||
payload /\x05\x64/
|
||||
tcp-state responder
|
||||
enable "dnp3"
|
||||
}
|
73
scripts/base/protocols/dnp3/main.bro
Normal file
73
scripts/base/protocols/dnp3/main.bro
Normal file
|
@ -0,0 +1,73 @@
|
|||
##! A very basic DNP3 analysis script that just logs requests and replies.
|
||||
|
||||
module DNP3;
|
||||
|
||||
@load ./consts
|
||||
|
||||
export {
|
||||
redef enum Log::ID += { LOG };
|
||||
|
||||
type Info: record {
|
||||
## Time of the request.
|
||||
ts: time &log;
|
||||
## Unique identifier for the connnection.
|
||||
uid: string &log;
|
||||
## Identifier for the connection.
|
||||
id: conn_id &log;
|
||||
## The name of the function message in the request.
|
||||
fc_request: string &log &optional;
|
||||
## The name of the function message in the reply.
|
||||
fc_reply: string &log &optional;
|
||||
## The response's "internal indication number".
|
||||
iin: count &log &optional;
|
||||
};
|
||||
|
||||
## Event that can be handled to access the DNP3 record as it is sent on
|
||||
## to the logging framework.
|
||||
global log_dnp3: event(rec: Info);
|
||||
}
|
||||
|
||||
redef record connection += {
|
||||
dnp3: Info &optional;
|
||||
};
|
||||
|
||||
const ports = { 20000/tcp };
|
||||
redef likely_server_ports += { ports };
|
||||
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(DNP3::LOG, [$columns=Info, $ev=log_dnp3]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_DNP3, ports);
|
||||
}
|
||||
|
||||
event dnp3_application_request_header(c: connection, is_orig: bool, fc: count)
|
||||
{
|
||||
if ( ! c?$dnp3 )
|
||||
c$dnp3 = [$ts=network_time(), $uid=c$uid, $id=c$id];
|
||||
|
||||
c$dnp3$ts = network_time();
|
||||
c$dnp3$fc_request = function_codes[fc];
|
||||
}
|
||||
|
||||
event dnp3_application_response_header(c: connection, is_orig: bool, fc: count, iin: count)
|
||||
{
|
||||
if ( ! c?$dnp3 )
|
||||
c$dnp3 = [$ts=network_time(), $uid=c$uid, $id=c$id];
|
||||
|
||||
c$dnp3$ts = network_time();
|
||||
c$dnp3$fc_reply = function_codes[fc];
|
||||
c$dnp3$iin = iin;
|
||||
|
||||
Log::write(LOG, c$dnp3);
|
||||
|
||||
delete c$dnp3;
|
||||
}
|
||||
|
||||
event connection_state_remove(c: connection) &priority=-5
|
||||
{
|
||||
if ( ! c?$dnp3 )
|
||||
return;
|
||||
|
||||
Log::write(LOG, c$dnp3);
|
||||
delete c$dnp3;
|
||||
}
|
|
@ -27,8 +27,10 @@ export {
|
|||
|
||||
event mime_begin_entity(c: connection) &priority=10
|
||||
{
|
||||
c$smtp$entity = Entity();
|
||||
++c$smtp_state$mime_depth;
|
||||
if ( c?$smtp )
|
||||
c$smtp$entity = Entity();
|
||||
if ( c?$smtp_state )
|
||||
++c$smtp_state$mime_depth;
|
||||
}
|
||||
|
||||
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue