mirror of
https://github.com/zeek/zeek.git
synced 2025-10-03 23:28:20 +00:00
Merge remote-tracking branch 'origin/master' into topic/seth/sumstats-updates
This commit is contained in:
commit
d6edbd27b1
96 changed files with 3085 additions and 839 deletions
|
@ -204,7 +204,7 @@ export {
|
|||
##
|
||||
## tag: Tag for the protocol analyzer having a callback being registered.
|
||||
##
|
||||
## reg: A :bro:see:`ProtoRegistration` record.
|
||||
## reg: A :bro:see:`Files::ProtoRegistration` record.
|
||||
##
|
||||
## Returns: true if the protocol being registered was not previously registered.
|
||||
global register_protocol: function(tag: Analyzer::Tag, reg: ProtoRegistration): bool;
|
||||
|
@ -228,11 +228,6 @@ redef record fa_file += {
|
|||
info: Info &optional;
|
||||
};
|
||||
|
||||
redef record AnalyzerArgs += {
|
||||
# This is used interally for the core file analyzer api.
|
||||
tag: Files::Tag &optional;
|
||||
};
|
||||
|
||||
# Store the callbacks for protocol analyzers that have files.
|
||||
global registered_protocols: table[Analyzer::Tag] of ProtoRegistration = table();
|
||||
|
||||
|
@ -275,14 +270,12 @@ function set_timeout_interval(f: fa_file, t: interval): bool
|
|||
|
||||
function add_analyzer(f: fa_file, tag: Files::Tag, args: AnalyzerArgs): bool
|
||||
{
|
||||
# This is to construct the correct args for the core API.
|
||||
args$tag = tag;
|
||||
add f$info$analyzers[Files::analyzer_name(tag)];
|
||||
|
||||
if ( tag in analyzer_add_callbacks )
|
||||
analyzer_add_callbacks[tag](f, args);
|
||||
|
||||
if ( ! __add_analyzer(f$id, args) )
|
||||
if ( ! __add_analyzer(f$id, tag, args) )
|
||||
{
|
||||
Reporter::warning(fmt("Analyzer %s not added successfully to file %s.", tag, f$id));
|
||||
return F;
|
||||
|
@ -297,8 +290,7 @@ function register_analyzer_add_callback(tag: Files::Tag, callback: function(f: f
|
|||
|
||||
function remove_analyzer(f: fa_file, tag: Files::Tag, args: AnalyzerArgs): bool
|
||||
{
|
||||
args$tag = tag;
|
||||
return __remove_analyzer(f$id, args);
|
||||
return __remove_analyzer(f$id, tag, args);
|
||||
}
|
||||
|
||||
function stop(f: fa_file): bool
|
||||
|
|
|
@ -109,7 +109,7 @@ export {
|
|||
|
||||
## Enables the old filtering approach of "only watch common ports for
|
||||
## analyzed protocols".
|
||||
##
|
||||
##
|
||||
## Unless you know what you are doing, leave this set to F.
|
||||
const enable_auto_protocol_capture_filters = F &redef;
|
||||
|
||||
|
|
|
@ -5,5 +5,6 @@
|
|||
@load ./sample
|
||||
@load ./std-dev
|
||||
@load ./sum
|
||||
@load ./topk
|
||||
@load ./unique
|
||||
@load ./variance
|
||||
@load ./variance
|
||||
|
|
50
scripts/base/frameworks/sumstats/plugins/topk.bro
Normal file
50
scripts/base/frameworks/sumstats/plugins/topk.bro
Normal file
|
@ -0,0 +1,50 @@
|
|||
@load base/frameworks/sumstats
|
||||
|
||||
module SumStats;
|
||||
|
||||
export {
|
||||
redef record Reducer += {
|
||||
## number of elements to keep in the top-k list
|
||||
topk_size: count &default=500;
|
||||
};
|
||||
|
||||
redef enum Calculation += {
|
||||
TOPK
|
||||
};
|
||||
|
||||
redef record ResultVal += {
|
||||
topk: opaque of topk &optional;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
hook init_resultval_hook(r: Reducer, rv: ResultVal)
|
||||
{
|
||||
if ( TOPK in r$apply && ! rv?$topk )
|
||||
rv$topk = topk_init(r$topk_size);
|
||||
}
|
||||
|
||||
hook observe_hook(r: Reducer, val: double, obs: Observation, rv: ResultVal)
|
||||
{
|
||||
if ( TOPK in r$apply )
|
||||
topk_add(rv$topk, obs);
|
||||
}
|
||||
|
||||
hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
|
||||
{
|
||||
if ( rv1?$topk )
|
||||
{
|
||||
result$topk = topk_init(topk_size(rv1$topk));
|
||||
|
||||
topk_merge(result$topk, rv1$topk);
|
||||
|
||||
if ( rv2?$topk )
|
||||
topk_merge(result$topk, rv2$topk);
|
||||
}
|
||||
|
||||
else if ( rv2?$topk )
|
||||
{
|
||||
result$topk = topk_init(topk_size(rv2$topk));
|
||||
topk_merge(result$topk, rv2$topk);
|
||||
}
|
||||
}
|
|
@ -531,22 +531,19 @@ type record_field_table: table[string] of record_field;
|
|||
# dependent on the names remaining as they are now.
|
||||
|
||||
## Set of BPF capture filters to use for capturing, indexed by a user-definable
|
||||
## ID (which must be unique). If Bro is *not* configured to examine
|
||||
## :bro:id:`PacketFilter::all_packets`, all packets matching at least
|
||||
## one of the filters in this table (and all in :bro:id:`restrict_filters`)
|
||||
## will be analyzed.
|
||||
## ID (which must be unique). If Bro is *not* configured with
|
||||
## :bro:id:`PacketFilter::enable_auto_protocol_capture_filters`,
|
||||
## all packets matching at least one of the filters in this table (and all in
|
||||
## :bro:id:`restrict_filters`) will be analyzed.
|
||||
##
|
||||
## .. bro:see:: PacketFilter PacketFilter::all_packets
|
||||
## .. bro:see:: PacketFilter PacketFilter::enable_auto_protocol_capture_filters
|
||||
## PacketFilter::unrestricted_filter restrict_filters
|
||||
global capture_filters: table[string] of string &redef;
|
||||
|
||||
## Set of BPF filters to restrict capturing, indexed by a user-definable ID (which
|
||||
## must be unique). If Bro is *not* configured to examine
|
||||
## :bro:id:`PacketFilter::all_packets`, only packets matching *all* of the
|
||||
## filters in this table (and any in :bro:id:`capture_filters`) will be
|
||||
## analyzed.
|
||||
## must be unique).
|
||||
##
|
||||
## .. bro:see:: PacketFilter PacketFilter::all_packets
|
||||
## .. bro:see:: PacketFilter PacketFilter::enable_auto_protocol_capture_filters
|
||||
## PacketFilter::unrestricted_filter capture_filters
|
||||
global restrict_filters: table[string] of string &redef;
|
||||
|
||||
|
@ -3042,6 +3039,11 @@ module GLOBAL;
|
|||
## Number of bytes per packet to capture from live interfaces.
|
||||
const snaplen = 8192 &redef;
|
||||
|
||||
## Seed for hashes computed internally for probabilistic data structures. Using
|
||||
## the same value here will make the hashes compatible between independent Bro
|
||||
## instances. If left unset, Bro will use a temporary local seed.
|
||||
const global_hash_seed: string = "" &redef;
|
||||
|
||||
# Load BiFs defined by plugins.
|
||||
@load base/bif/plugins
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
@load ./utils-commands
|
||||
@load ./info
|
||||
@load ./main
|
||||
@load ./utils
|
||||
@load ./files
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
@load ./info
|
||||
@load ./main
|
||||
@load ./utils
|
||||
@load base/utils/conn-ids
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
##! sizes are not logged, but at the benefit of saving CPU cycles that
|
||||
##! otherwise go to analyzing the large (and likely benign) connections.
|
||||
|
||||
@load ./info
|
||||
@load ./main
|
||||
@load base/protocols/conn
|
||||
@load base/protocols/ssl
|
||||
|
|
72
scripts/base/protocols/ftp/info.bro
Normal file
72
scripts/base/protocols/ftp/info.bro
Normal file
|
@ -0,0 +1,72 @@
|
|||
##! Defines data structures for tracking and logging FTP sessions.
|
||||
|
||||
module FTP;
|
||||
|
||||
@load ./utils-commands
|
||||
|
||||
export {
|
||||
|
||||
## This setting changes if passwords used in FTP sessions are
|
||||
## captured or not.
|
||||
const default_capture_password = F &redef;
|
||||
|
||||
## The expected endpoints of an FTP data channel.
|
||||
type ExpectedDataChannel: record {
|
||||
## Whether PASV mode is toggled for control channel.
|
||||
passive: bool &log;
|
||||
## The host that will be initiating the data connection.
|
||||
orig_h: addr &log;
|
||||
## The host that will be accepting the data connection.
|
||||
resp_h: addr &log;
|
||||
## The port at which the acceptor is listening for the data connection.
|
||||
resp_p: port &log;
|
||||
};
|
||||
|
||||
type Info: record {
|
||||
## Time when the command was sent.
|
||||
ts: time &log;
|
||||
## Unique ID for the connection.
|
||||
uid: string &log;
|
||||
## The connection's 4-tuple of endpoint addresses/ports.
|
||||
id: conn_id &log;
|
||||
## User name for the current FTP session.
|
||||
user: string &log &default="<unknown>";
|
||||
## Password for the current FTP session if captured.
|
||||
password: string &log &optional;
|
||||
## Command given by the client.
|
||||
command: string &log &optional;
|
||||
## Argument for the command if one is given.
|
||||
arg: string &log &optional;
|
||||
|
||||
## Libmagic "sniffed" file type if the command indicates a file transfer.
|
||||
mime_type: string &log &optional;
|
||||
## Size of the file if the command indicates a file transfer.
|
||||
file_size: count &log &optional;
|
||||
|
||||
## Reply code from the server in response to the command.
|
||||
reply_code: count &log &optional;
|
||||
## Reply message from the server in response to the command.
|
||||
reply_msg: string &log &optional;
|
||||
|
||||
## Expected FTP data channel.
|
||||
data_channel: ExpectedDataChannel &log &optional;
|
||||
|
||||
## Current working directory that this session is in. By making
|
||||
## the default value '.', we can indicate that unless something
|
||||
## more concrete is discovered that the existing but unknown
|
||||
## directory is ok to use.
|
||||
cwd: string &default=".";
|
||||
|
||||
## Command that is currently waiting for a response.
|
||||
cmdarg: CmdArg &optional;
|
||||
## Queue for commands that have been sent but not yet responded to
|
||||
## are tracked here.
|
||||
pending_commands: PendingCmds;
|
||||
|
||||
## Indicates if the session is in active or passive mode.
|
||||
passive: bool &default=F;
|
||||
|
||||
## Determines if the password will be captured for this request.
|
||||
capture_password: bool &default=default_capture_password;
|
||||
};
|
||||
}
|
|
@ -3,6 +3,8 @@
|
|||
##! will take on the full path that the client is at along with the requested
|
||||
##! file name.
|
||||
|
||||
@load ./info
|
||||
@load ./utils
|
||||
@load ./utils-commands
|
||||
@load base/utils/paths
|
||||
@load base/utils/numbers
|
||||
|
@ -20,72 +22,9 @@ export {
|
|||
"EPSV"
|
||||
} &redef;
|
||||
|
||||
## This setting changes if passwords used in FTP sessions are captured or not.
|
||||
const default_capture_password = F &redef;
|
||||
|
||||
## User IDs that can be considered "anonymous".
|
||||
const guest_ids = { "anonymous", "ftp", "ftpuser", "guest" } &redef;
|
||||
|
||||
## The expected endpoints of an FTP data channel.
|
||||
type ExpectedDataChannel: record {
|
||||
## Whether PASV mode is toggled for control channel.
|
||||
passive: bool &log;
|
||||
## The host that will be initiating the data connection.
|
||||
orig_h: addr &log;
|
||||
## The host that will be accepting the data connection.
|
||||
resp_h: addr &log;
|
||||
## The port at which the acceptor is listening for the data connection.
|
||||
resp_p: port &log;
|
||||
};
|
||||
|
||||
type Info: record {
|
||||
## Time when the command was sent.
|
||||
ts: time &log;
|
||||
## Unique ID for the connection.
|
||||
uid: string &log;
|
||||
## The connection's 4-tuple of endpoint addresses/ports.
|
||||
id: conn_id &log;
|
||||
## User name for the current FTP session.
|
||||
user: string &log &default="<unknown>";
|
||||
## Password for the current FTP session if captured.
|
||||
password: string &log &optional;
|
||||
## Command given by the client.
|
||||
command: string &log &optional;
|
||||
## Argument for the command if one is given.
|
||||
arg: string &log &optional;
|
||||
|
||||
## Libmagic "sniffed" file type if the command indicates a file transfer.
|
||||
mime_type: string &log &optional;
|
||||
## Size of the file if the command indicates a file transfer.
|
||||
file_size: count &log &optional;
|
||||
|
||||
## Reply code from the server in response to the command.
|
||||
reply_code: count &log &optional;
|
||||
## Reply message from the server in response to the command.
|
||||
reply_msg: string &log &optional;
|
||||
|
||||
## Expected FTP data channel.
|
||||
data_channel: ExpectedDataChannel &log &optional;
|
||||
|
||||
## Current working directory that this session is in. By making
|
||||
## the default value '.', we can indicate that unless something
|
||||
## more concrete is discovered that the existing but unknown
|
||||
## directory is ok to use.
|
||||
cwd: string &default=".";
|
||||
|
||||
## Command that is currently waiting for a response.
|
||||
cmdarg: CmdArg &optional;
|
||||
## Queue for commands that have been sent but not yet responded to
|
||||
## are tracked here.
|
||||
pending_commands: PendingCmds;
|
||||
|
||||
## Indicates if the session is in active or passive mode.
|
||||
passive: bool &default=F;
|
||||
|
||||
## Determines if the password will be captured for this request.
|
||||
capture_password: bool &default=default_capture_password;
|
||||
};
|
||||
|
||||
## This record is to hold a parsed FTP reply code. For example, for the
|
||||
## 201 status code, the digits would be parsed as: x->2, y->0, z=>1.
|
||||
type ReplyCode: record {
|
||||
|
@ -102,8 +41,6 @@ export {
|
|||
global log_ftp: event(rec: Info);
|
||||
}
|
||||
|
||||
@load ./utils
|
||||
|
||||
# Add the state tracking information variable to the connection record
|
||||
redef record connection += {
|
||||
ftp: Info &optional;
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
##! Utilities specific for FTP processing.
|
||||
|
||||
@load ./main
|
||||
@load ./info
|
||||
@load base/utils/addrs
|
||||
@load base/utils/paths
|
||||
|
||||
module FTP;
|
||||
|
||||
|
|
|
@ -67,11 +67,8 @@ export {
|
|||
## (especially with large file transfers).
|
||||
const disable_analyzer_after_detection = T &redef;
|
||||
|
||||
## The maximum amount of time a script can delay records from being logged.
|
||||
const max_log_delay = 15secs &redef;
|
||||
|
||||
## Delays an SSL record for a specific token: the record will not be logged
|
||||
## as longs the token exists or until :bro:id:`SSL::max_log_delay` elapses.
|
||||
## as longs the token exists or until 15 seconds elapses.
|
||||
global delay_log: function(info: Info, token: string);
|
||||
|
||||
## Undelays an SSL record for a previously inserted token, allowing the
|
||||
|
@ -90,7 +87,7 @@ redef record connection += {
|
|||
redef record Info += {
|
||||
# Adding a string "token" to this set will cause the SSL script
|
||||
# to delay logging the record until either the token has been removed or
|
||||
# the record has been delayed for :bro:id:`SSL::max_log_delay`.
|
||||
# the record has been delayed.
|
||||
delay_tokens: set[string] &optional;
|
||||
};
|
||||
|
||||
|
@ -138,7 +135,7 @@ function log_record(info: Info)
|
|||
{
|
||||
log_record(info);
|
||||
}
|
||||
timeout SSL::max_log_delay
|
||||
timeout 15secs
|
||||
{
|
||||
Reporter::info(fmt("SSL delay tokens not released in time (%s tokens remaining)",
|
||||
|info$delay_tokens|));
|
||||
|
|
|
@ -34,8 +34,8 @@ export {
|
|||
global current_shunted_host_pairs: function(): set[conn_id];
|
||||
|
||||
redef enum Notice::Type += {
|
||||
## Indicative that :bro:id:`max_bpf_shunts` connections are already
|
||||
## being shunted with BPF filters and no more are allowed.
|
||||
## Indicative that :bro:id:`PacketFilter::max_bpf_shunts` connections
|
||||
## are already being shunted with BPF filters and no more are allowed.
|
||||
No_More_Conn_Shunts_Available,
|
||||
|
||||
## Limitations in BPF make shunting some connections with BPF impossible.
|
||||
|
|
|
@ -12,12 +12,12 @@ export {
|
|||
## Apply BPF filters to each worker in a way that causes them to
|
||||
## automatically flow balance traffic between them.
|
||||
AUTO_BPF,
|
||||
## Load balance traffic across the workers by making each one apply
|
||||
## a restrict filter to only listen to a single MAC address. This
|
||||
## is a somewhat common deployment option for sites doing network
|
||||
## based load balancing with MAC address rewriting and passing the
|
||||
## traffic to a single interface. Multiple MAC addresses will show
|
||||
## up on the same interface and need filtered to a single address.
|
||||
# Load balance traffic across the workers by making each one apply
|
||||
# a restrict filter to only listen to a single MAC address. This
|
||||
# is a somewhat common deployment option for sites doing network
|
||||
# based load balancing with MAC address rewriting and passing the
|
||||
# traffic to a single interface. Multiple MAC addresses will show
|
||||
# up on the same interface and need filtered to a single address.
|
||||
#MAC_ADDR_BPF,
|
||||
};
|
||||
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
## Capture TCP fragments, but not UDP (or ICMP), since those are a lot more
|
||||
## common due to high-volume, fragmenting protocols such as NFS :-(.
|
||||
# Capture TCP fragments, but not UDP (or ICMP), since those are a lot more
|
||||
# common due to high-volume, fragmenting protocols such as NFS :-(.
|
||||
|
||||
## This normally isn't used because of the default open packet filter
|
||||
## but we set it anyway in case the user is using a packet filter.
|
||||
## Note: This was removed because the default model now is to have a wide
|
||||
## open packet filter.
|
||||
# This normally isn't used because of the default open packet filter
|
||||
# but we set it anyway in case the user is using a packet filter.
|
||||
# Note: This was removed because the default model now is to have a wide
|
||||
# open packet filter.
|
||||
#redef capture_filters += { ["frag"] = "(ip[6:2] & 0x3fff != 0) and tcp" };
|
||||
|
||||
## Shorten the fragment timeout from never expiring to expiring fragments after
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue