Merge remote-tracking branch 'origin/master' into topic/bernhard/sqlite

Conflicts:
	src/threading/AsciiFormatter.cc
This commit is contained in:
Bernhard Amann 2013-03-11 11:47:10 -07:00
commit 8cb91de93a
203 changed files with 3278 additions and 1284 deletions

View file

@ -39,7 +39,7 @@ export {
## The node type doing all the actual traffic analysis.
WORKER,
## A node acting as a traffic recorder using the
## `Time Machine <http://tracker.bro-ids.org/time-machine>`_ software.
## `Time Machine <http://tracker.bro.org/time-machine>`_ software.
TIME_MACHINE,
};

View file

@ -12,17 +12,17 @@ export {
const default_mode = MANUAL &redef;
## Separator between fields.
## Please note that the separator has to be exactly one character long
## Please note that the separator has to be exactly one character long.
## Can be overwritten by individual writers.
const separator = "\t" &redef;
## Separator between set elements.
## Please note that the separator has to be exactly one character long
## Please note that the separator has to be exactly one character long.
## Can be overwritten by individual writers.
const set_separator = "," &redef;
## String to use for empty fields.
## Can be overwritten by individual writers.
## Can be overwritten by individual writers.
const empty_field = "(empty)" &redef;
## String to use for an unset &optional field.
@ -133,7 +133,7 @@ export {
global add_event: function(description: Input::EventDescription) : bool;
## Remove a input stream. Returns true on success and false if the named stream was
## not found.
## not found.
##
## id: string value identifying the stream to be removed
global remove: function(id: string) : bool;

View file

@ -17,7 +17,9 @@
@if ( Cluster::is_enabled() )
@load ./cluster
@else
@load ./non-cluster
@endif
# Load here so that it can check whether clustering is enabled.
@load ./actions/pp-alarms
@load ./actions/pp-alarms

View file

@ -27,18 +27,17 @@ export {
## Notice types which should have the "remote" location looked up.
## If GeoIP support is not built in, this does nothing.
const lookup_location_types: set[Notice::Type] = {} &redef;
## Add a helper to the notice policy for looking up GeoIP data.
redef Notice::policy += {
[$pred(n: Notice::Info) = { return (n$note in Notice::lookup_location_types); },
$action = ACTION_ADD_GEODATA,
$priority = 10],
};
}
hook policy(n: Notice::Info) &priority=10
{
if ( n$note in Notice::lookup_location_types )
add n$actions[ACTION_ADD_GEODATA];
}
# This is handled at a high priority in case other notice handlers
# want to use the data.
event notice(n: Notice::Info) &priority=10
hook notice(n: Notice::Info) &priority=10
{
if ( ACTION_ADD_GEODATA in n$actions &&
|Site::local_nets| > 0 &&

View file

@ -17,20 +17,13 @@ export {
};
}
# This is a little awkward because we want to inject drop along with the
# synchronous functions.
event bro_init()
hook notice(n: Notice::Info)
{
local drop_func = function(n: Notice::Info)
if ( ACTION_DROP in n$actions )
{
if ( ACTION_DROP in n$actions )
{
#local drop = React::drop_address(n$src, "");
#local addl = drop?$sub ? fmt(" %s", drop$sub) : "";
#n$dropped = drop$note != Drop::AddressDropIgnored;
#n$msg += fmt(" [%s%s]", drop$note, addl);
}
};
add Notice::sync_functions[drop_func];
#local drop = React::drop_address(n$src, "");
#local addl = drop?$sub ? fmt(" %s", drop$sub) : "";
#n$dropped = drop$note != Drop::AddressDropIgnored;
#n$msg += fmt(" [%s%s]", drop$note, addl);
}
}

View file

@ -18,7 +18,7 @@ export {
};
}
event notice(n: Notice::Info) &priority=-5
hook notice(n: Notice::Info) &priority=-5
{
if ( |Site::local_admins| > 0 &&
ACTION_EMAIL_ADMIN in n$actions )

View file

@ -15,7 +15,7 @@ export {
const mail_page_dest = "" &redef;
}
event notice(n: Notice::Info) &priority=-5
hook notice(n: Notice::Info) &priority=-5
{
if ( ACTION_PAGE in n$actions )
email_notice_to(n, mail_page_dest, F);

View file

@ -105,7 +105,7 @@ event bro_init()
$postprocessor=pp_postprocessor]);
}
event notice(n: Notice::Info) &priority=-5
hook notice(n: Notice::Info) &priority=-5
{
if ( ! want_pp() )
return;

View file

@ -21,30 +21,10 @@ redef Cluster::manager2worker_events += /Notice::begin_suppression/;
redef Cluster::worker2manager_events += /Notice::cluster_notice/;
@if ( Cluster::local_node_type() != Cluster::MANAGER )
# The notice policy is completely handled by the manager and shouldn't be
# done by workers or proxies to save time for packet processing.
redef Notice::policy = table();
event Notice::begin_suppression(n: Notice::Info)
{
suppressing[n$note, n$identifier] = n;
}
event Notice::notice(n: Notice::Info)
{
# Send the locally generated notice on to the manager.
event Notice::cluster_notice(n);
}
event bro_init() &priority=-3
{
# Workers and proxies need to disable the notice streams because notice
# events are forwarded directly instead of being logged remotely.
Log::disable_stream(Notice::LOG);
Log::disable_stream(Notice::POLICY_LOG);
Log::disable_stream(Notice::ALARM_LOG);
}
@endif
@if ( Cluster::local_node_type() == Cluster::MANAGER )
@ -54,3 +34,19 @@ event Notice::cluster_notice(n: Notice::Info)
NOTICE(n);
}
@endif
module GLOBAL;
## This is the entry point in the global namespace for the notice framework.
function NOTICE(n: Notice::Info)
{
# Suppress this notice if necessary.
if ( Notice::is_being_suppressed(n) )
return;
if ( Cluster::local_node_type() == Cluster::MANAGER )
Notice::internal_NOTICE(n);
else
# For non-managers, send the notice on to the manager.
event Notice::cluster_notice(n);
}

View file

@ -13,7 +13,7 @@ module Notice;
# reference to the original notice)
global tmp_notice_storage: table[string] of Notice::Info &create_expire=max_email_delay+10secs;
event Notice::notice(n: Notice::Info) &priority=10
hook notice(n: Notice::Info) &priority=10
{
if ( ! n?$src && ! n?$dst )
return;

View file

@ -10,9 +10,6 @@ export {
redef enum Log::ID += {
## This is the primary logging stream for notices.
LOG,
## This is the notice policy auditing log. It records what the current
## notice policy is at Bro init time.
POLICY_LOG,
## This is the alarm stream.
ALARM_LOG,
};
@ -42,9 +39,6 @@ export {
## version of the alarm log is emailed in bulk to the address(es)
## configured in :bro:id:`Notice::mail_dest`.
ACTION_ALARM,
## Indicates that the notice should not be supressed by the normal
## duplicate notice suppression that the notice framework does.
ACTION_NO_SUPPRESS,
};
## The notice framework is able to do automatic notice supression by
@ -64,7 +58,7 @@ export {
## A connection 4-tuple identifying the endpoints concerned with the
## notice.
id: conn_id &log &optional;
## A shorthand way of giving the uid and id to a notice. The
## reference to the actual connection will be deleted after applying
## the notice policy.
@ -102,10 +96,6 @@ export {
## The actions which have been applied to this notice.
actions: set[Notice::Action] &log &optional;
## These are policy items that returned T and applied their action
## to the notice.
policy_items: set[count] &log &optional;
## By adding chunks of text into this element, other scripts can
## expand on notices that are being emailed. The normal way to add text
## is to extend the vector by handling the :bro:id:`Notice::notice`
@ -142,9 +132,8 @@ export {
identifier: string &optional;
## This field indicates the length of time that this
## unique notice should be suppressed. This field is automatically
## filled out and should not be written to by any other script.
suppress_for: interval &log &optional;
## unique notice should be suppressed.
suppress_for: interval &log &default=default_suppression_interval;
};
## Ignored notice types.
@ -159,58 +148,8 @@ export {
## intervals for entire notice types.
const type_suppression_intervals: table[Notice::Type] of interval = {} &redef;
## This is the record that defines the items that make up the notice policy.
type PolicyItem: record {
## This is the exact positional order in which the
## :bro:type:`Notice::PolicyItem` records are checked.
## This is set internally by the notice framework.
position: count &log &optional;
## Define the priority for this check. Items are checked in ordered
## from highest value (10) to lowest value (0).
priority: count &log &default=5;
## An action given to the notice if the predicate return true.
action: Notice::Action &log &default=ACTION_NONE;
## The pred (predicate) field is a function that returns a boolean T
## or F value. If the predicate function return true, the action in
## this record is applied to the notice that is given as an argument
## to the predicate function. If no predicate is supplied, it's
## assumed that the PolicyItem always applies.
pred: function(n: Notice::Info): bool &log &optional;
## Indicates this item should terminate policy processing if the
## predicate returns T.
halt: bool &log &default=F;
## This defines the length of time that this particular notice should
## be supressed.
suppress_for: interval &log &optional;
};
## Defines a notice policy that is extensible on a per-site basis.
## All notice processing is done through this variable.
const policy: set[PolicyItem] = {
[$pred(n: Notice::Info) = { return (n$note in Notice::ignored_types); },
$halt=T, $priority = 9],
[$pred(n: Notice::Info) = { return (n$note in Notice::not_suppressed_types); },
$action = ACTION_NO_SUPPRESS,
$priority = 9],
[$pred(n: Notice::Info) = { return (n$note in Notice::alarmed_types); },
$action = ACTION_ALARM,
$priority = 8],
[$pred(n: Notice::Info) = { return (n$note in Notice::emailed_types); },
$action = ACTION_EMAIL,
$priority = 8],
[$pred(n: Notice::Info) = {
if (n$note in Notice::type_suppression_intervals)
{
n$suppress_for=Notice::type_suppression_intervals[n$note];
return T;
}
return F;
},
$action = ACTION_NONE,
$priority = 8],
[$action = ACTION_LOG,
$priority = 0],
} &redef;
## The hook to modify notice handling.
global policy: hook(n: Notice::Info);
## Local system sendmail program.
const sendmail = "/usr/sbin/sendmail" &redef;
@ -240,25 +179,11 @@ export {
## This is the event that is called as the entry point to the
## notice framework by the global :bro:id:`NOTICE` function. By the time
## this event is generated, default values have already been filled out in
## the :bro:type:`Notice::Info` record and synchronous functions in the
## :bro:id:`Notice::sync_functions` have already been called. The notice
## the :bro:type:`Notice::Info` record and the notice
## policy has also been applied.
##
## n: The record containing notice data.
global notice: event(n: Info);
## This is a set of functions that provide a synchronous way for scripts
## extending the notice framework to run before the normal event based
## notice pathway that most of the notice framework takes. This is helpful
## in cases where an action against a notice needs to happen immediately
## and can't wait the short time for the event to bubble up to the top of
## the event queue. An example is the IP address dropping script that
## can block IP addresses that have notices generated because it
## needs to operate closer to real time than the event queue allows it to.
## Normally the event based extension model using the
## :bro:id:`Notice::notice` event will work fine if there aren't harder
## real time constraints.
const sync_functions: set[function(n: Notice::Info)] = set() &redef;
global notice: hook(n: Info);
## This event is generated when a notice begins to be suppressed.
##
@ -266,6 +191,11 @@ export {
## about to be suppressed.
global begin_suppression: event(n: Notice::Info);
## A function to determine if an event is supposed to be suppressed.
##
## n: The record containing the notice in question.
global is_being_suppressed: function(n: Notice::Info): bool;
## This event is generated on each occurence of an event being suppressed.
##
## n: The record containing notice data regarding the notice type
@ -299,13 +229,13 @@ export {
##
## Returns: a string of mail headers to which an email body can be appended
global email_headers: function(subject_desc: string, dest: string): string;
## This event can be handled to access the :bro:type:`Notice::Info`
## record as it is sent on to the logging framework.
##
## rec: The record containing notice data before it is logged.
global log_notice: event(rec: Info);
## This is an internal wrapper for the global :bro:id:`NOTICE` function;
## disregard.
##
@ -338,10 +268,6 @@ global suppressing: table[Type, string] of Notice::Info = {}
&create_expire=0secs
&expire_func=per_notice_suppression_interval;
# This is an internal variable used to store the notice policy ordered by
# priority.
global ordered_policy: vector of PolicyItem = vector();
function log_mailing_postprocessor(info: Log::RotationInfo): bool
{
if ( ! reading_traces() && mail_dest != "" )
@ -424,9 +350,7 @@ function email_notice_to(n: Notice::Info, dest: string, extend: bool)
}
else
{
event reporter_info(network_time(),
fmt("Notice email delay tokens weren't released in time (%s).", n$email_delay_tokens),
"");
Reporter::info(fmt("Notice email delay tokens weren't released in time (%s).", n$email_delay_tokens));
}
}
}
@ -468,7 +392,26 @@ function email_notice_to(n: Notice::Info, dest: string, extend: bool)
piped_exec(fmt("%s -t -oi", sendmail), email_text);
}
event notice(n: Notice::Info) &priority=-5
hook Notice::policy(n: Notice::Info) &priority=10
{
if ( n$note in Notice::ignored_types )
break;
if ( n$note in Notice::not_suppressed_types )
n$suppress_for=0secs;
if ( n$note in Notice::alarmed_types )
add n$actions[ACTION_ALARM];
if ( n$note in Notice::emailed_types )
add n$actions[ACTION_EMAIL];
if ( n$note in Notice::type_suppression_intervals )
n$suppress_for=Notice::type_suppression_intervals[n$note];
# Logging is a default action. It can be removed in a later hook if desired.
add n$actions[ACTION_LOG];
}
hook Notice::notice(n: Notice::Info) &priority=-5
{
if ( ACTION_EMAIL in n$actions )
email_notice_to(n, mail_dest, T);
@ -480,7 +423,6 @@ event notice(n: Notice::Info) &priority=-5
# Normally suppress further notices like this one unless directed not to.
# n$identifier *must* be specified for suppression to function at all.
if ( n?$identifier &&
ACTION_NO_SUPPRESS !in n$actions &&
[n$note, n$identifier] !in suppressing &&
n$suppress_for != 0secs )
{
@ -488,8 +430,8 @@ event notice(n: Notice::Info) &priority=-5
event Notice::begin_suppression(n);
}
}
## This determines if a notice is being suppressed. It is only used
## This determines if a notice is being suppressed. It is only used
## internally as part of the mechanics for the global :bro:id:`NOTICE`
## function.
function is_being_suppressed(n: Notice::Info): bool
@ -539,7 +481,7 @@ function apply_policy(n: Notice::Info)
n$p = n$id$resp_p;
}
if ( n?$p )
if ( n?$p )
n$proto = get_port_transport_proto(n$p);
if ( n?$iconn )
@ -565,27 +507,8 @@ function apply_policy(n: Notice::Info)
if ( ! n?$email_delay_tokens )
n$email_delay_tokens = set();
if ( ! n?$policy_items )
n$policy_items = set();
for ( i in ordered_policy )
{
# If there's no predicate or the predicate returns F.
if ( ! ordered_policy[i]?$pred || ordered_policy[i]$pred(n) )
{
add n$actions[ordered_policy[i]$action];
add n$policy_items[int_to_count(i)];
# If the predicate matched and there was a suppression interval,
# apply it to the notice now.
if ( ordered_policy[i]?$suppress_for )
n$suppress_for = ordered_policy[i]$suppress_for;
# If the policy item wants to halt policy processing, do it now!
if ( ordered_policy[i]$halt )
break;
}
}
# Apply the hook based policy.
hook Notice::policy(n);
# Apply the suppression time after applying the policy so that policy
# items can give custom suppression intervals. If there is no
@ -602,61 +525,15 @@ function apply_policy(n: Notice::Info)
delete n$iconn;
}
# Create the ordered notice policy automatically which will be used at runtime
# for prioritized matching of the notice policy.
event bro_init() &priority=10
{
# Create the policy log here because it's only written to in this handler.
Log::create_stream(Notice::POLICY_LOG, [$columns=PolicyItem]);
local tmp: table[count] of set[PolicyItem] = table();
for ( pi in policy )
{
if ( pi$priority < 0 || pi$priority > 10 )
Reporter::fatal("All Notice::PolicyItem priorities must be within 0 and 10");
if ( pi$priority !in tmp )
tmp[pi$priority] = set();
add tmp[pi$priority][pi];
}
local rev_count = vector(10,9,8,7,6,5,4,3,2,1,0);
for ( i in rev_count )
{
local j = rev_count[i];
if ( j in tmp )
{
for ( pi in tmp[j] )
{
pi$position = |ordered_policy|;
ordered_policy[|ordered_policy|] = pi;
Log::write(Notice::POLICY_LOG, pi);
}
}
}
}
function internal_NOTICE(n: Notice::Info)
{
# Suppress this notice if necessary.
if ( is_being_suppressed(n) )
return;
# Fill out fields that might be empty and do the policy processing.
apply_policy(n);
# Run the synchronous functions with the notice.
for ( func in sync_functions )
func(n);
# Generate the notice event with the notice.
event Notice::notice(n);
hook Notice::notice(n);
}
module GLOBAL;
## This is the entry point in the global namespace for notice framework.
function NOTICE(n: Notice::Info)
{
Notice::internal_NOTICE(n);
}
global NOTICE: function(n: Notice::Info);

View file

@ -0,0 +1,14 @@
@load ./main
module GLOBAL;
## This is the entry point in the global namespace for notice framework.
function NOTICE(n: Notice::Info)
{
# Suppress this notice if necessary.
if ( Notice::is_being_suppressed(n) )
return;
Notice::internal_NOTICE(n);
}

View file

@ -1,10 +1,15 @@
##! This framework is intended to create an output and filtering path for
##! internal messages/warnings/errors. It should typically be loaded to
##! avoid Bro spewing internal messages to standard error and instead log
##! them to a file in a standard way. Note that this framework deals with
##! the handling of internally-generated reporter messages, for the
##! interface into actually creating reporter messages from the scripting
##! layer, use the built-in functions in :doc:`/scripts/base/reporter.bif`.
##! log such messages to a file in a standard way. For the options to
##! toggle whether messages are additionally written to STDERR, see
##! :bro:see:`Reporter::info_to_stderr`,
##! :bro:see:`Reporter::warnings_to_stderr`, and
##! :bro:see:`Reporter::errors_to_stderr`.
##!
##! Note that this framework deals with the handling of internally generated
##! reporter messages, for the interface in to actually creating interface
##! into actually creating reporter messages from the scripting layer, use
##! the built-in functions in :doc:`/scripts/base/reporter.bif`.
module Reporter;
@ -36,26 +41,11 @@ export {
## Not all reporter messages will have locations in them though.
location: string &log &optional;
};
## Tunable for sending reporter warning messages to STDERR. The option to
## turn it off is presented here in case Bro is being run by some
## external harness and shouldn't output anything to the console.
const warnings_to_stderr = T &redef;
## Tunable for sending reporter error messages to STDERR. The option to
## turn it off is presented here in case Bro is being run by some
## external harness and shouldn't output anything to the console.
const errors_to_stderr = T &redef;
}
global stderr: file;
event bro_init() &priority=5
{
Log::create_stream(Reporter::LOG, [$columns=Info]);
if ( errors_to_stderr || warnings_to_stderr )
stderr = open("/dev/stderr");
}
event reporter_info(t: time, msg: string, location: string) &priority=-5
@ -65,26 +55,10 @@ event reporter_info(t: time, msg: string, location: string) &priority=-5
event reporter_warning(t: time, msg: string, location: string) &priority=-5
{
if ( warnings_to_stderr )
{
if ( t > double_to_time(0.0) )
print stderr, fmt("WARNING: %.6f %s (%s)", t, msg, location);
else
print stderr, fmt("WARNING: %s (%s)", msg, location);
}
Log::write(Reporter::LOG, [$ts=t, $level=WARNING, $message=msg, $location=location]);
}
event reporter_error(t: time, msg: string, location: string) &priority=-5
{
if ( errors_to_stderr )
{
if ( t > double_to_time(0.0) )
print stderr, fmt("ERROR: %.6f %s (%s)", t, msg, location);
else
print stderr, fmt("ERROR: %s (%s)", msg, location);
}
Log::write(Reporter::LOG, [$ts=t, $level=ERROR, $message=msg, $location=location]);
}

View file

@ -148,7 +148,7 @@ function has_signature_matched(id: string, orig: addr, resp: addr): bool
event sig_summary(orig: addr, id: string, msg: string)
{
NOTICE([$note=Signature_Summary, $src=orig,
$filename=id, $msg=fmt("%s: %s", orig, msg),
$msg=fmt("%s: %s", orig, msg),
$n=count_per_orig[orig,id] ]);
}
@ -161,7 +161,7 @@ event signature_match(state: signature_state, msg: string, data: string)
return;
# Trim the matched data down to something reasonable
if ( byte_len(data) > 140 )
if ( |data| > 140 )
data = fmt("%s...", sub_bytes(data, 0, 140));
local src_addr: addr;
@ -209,7 +209,6 @@ event signature_match(state: signature_state, msg: string, data: string)
{
NOTICE([$note=Count_Signature, $conn=state$conn,
$msg=msg,
$filename=sig_id,
$n=count_per_resp[dst,sig_id],
$sub=fmt("%d matches of signature %s on host %s",
count_per_resp[dst,sig_id],
@ -240,7 +239,7 @@ event signature_match(state: signature_state, msg: string, data: string)
if ( notice )
NOTICE([$note=Sensitive_Signature,
$conn=state$conn, $src=src_addr,
$dst=dst_addr, $filename=sig_id, $msg=fmt("%s: %s", src_addr, msg),
$dst=dst_addr, $msg=fmt("%s: %s", src_addr, msg),
$sub=data]);
if ( action == SIG_FILE_BUT_NO_SCAN || action == SIG_SUMMARY )
@ -260,8 +259,8 @@ event signature_match(state: signature_state, msg: string, data: string)
add vert_table[orig, resp][sig_id];
local hcount = length(horiz_table[orig, sig_id]);
local vcount = length(vert_table[orig, resp]);
local hcount = |horiz_table[orig, sig_id]|;
local vcount = |vert_table[orig, resp]|;
if ( hcount in horiz_scan_thresholds && hcount != last_hthresh[orig] )
{
@ -274,7 +273,7 @@ event signature_match(state: signature_state, msg: string, data: string)
$src_addr=orig, $sig_id=sig_id, $event_msg=msg,
$host_count=hcount, $sub_msg=horz_scan_msg]);
NOTICE([$note=Multiple_Sig_Responders, $src=orig, $filename=sig_id,
NOTICE([$note=Multiple_Sig_Responders, $src=orig,
$msg=msg, $n=hcount, $sub=horz_scan_msg]);
last_hthresh[orig] = hcount;
@ -295,7 +294,6 @@ event signature_match(state: signature_state, msg: string, data: string)
$sub_msg=vert_scan_msg]);
NOTICE([$note=Multiple_Signatures, $src=orig, $dst=resp,
$filename=sig_id,
$msg=fmt("%s different signatures triggered", vcount),
$n=vcount, $sub=vert_scan_msg]);

View file

@ -88,10 +88,10 @@ redef dpd_config += { [ANALYZER_AYIYA] = [$ports = ayiya_ports] };
const teredo_ports = { 3544/udp };
redef dpd_config += { [ANALYZER_TEREDO] = [$ports = teredo_ports] };
const gtpv1u_ports = { 2152/udp };
redef dpd_config += { [ANALYZER_GTPV1] = [$ports = gtpv1u_ports] };
const gtpv1_ports = { 2152/udp, 2123/udp };
redef dpd_config += { [ANALYZER_GTPV1] = [$ports = gtpv1_ports] };
redef likely_server_ports += { ayiya_ports, teredo_ports, gtpv1u_ports };
redef likely_server_ports += { ayiya_ports, teredo_ports, gtpv1_ports };
event bro_init() &priority=5
{

View file

@ -240,7 +240,7 @@ export {
## The 4-tuple of the encapsulating "connection". In case of an IP-in-IP
## tunnel the ports will be set to 0. The direction (i.e., orig and
## resp) are set according to the first tunneled packet seen
## and not according to the side that established the tunnel.
## and not according to the side that established the tunnel.
cid: conn_id;
## The type of tunnel.
tunnel_type: Tunnel::Type;
@ -1488,6 +1488,146 @@ type gtpv1_hdr: record {
next_type: count &optional;
};
type gtp_cause: count;
type gtp_imsi: count;
type gtp_teardown_ind: bool;
type gtp_nsapi: count;
type gtp_recovery: count;
type gtp_teid1: count;
type gtp_teid_control_plane: count;
type gtp_charging_id: count;
type gtp_charging_gateway_addr: addr;
type gtp_trace_reference: count;
type gtp_trace_type: count;
type gtp_tft: string;
type gtp_trigger_id: string;
type gtp_omc_id: string;
type gtp_reordering_required: bool;
type gtp_proto_config_options: string;
type gtp_charging_characteristics: count;
type gtp_selection_mode: count;
type gtp_access_point_name: string;
type gtp_msisdn: string;
type gtp_gsn_addr: record {
## If the GSN Address information element has length 4 or 16, then this
## field is set to be the informational element's value interpreted as
## an IPv4 or IPv6 address, respectively.
ip: addr &optional;
## This field is set if it's not an IPv4 or IPv6 address.
other: string &optional;
};
type gtp_end_user_addr: record {
pdp_type_org: count;
pdp_type_num: count;
## Set if the End User Address information element is IPv4/IPv6.
pdp_ip: addr &optional;
## Set if the End User Address information element isn't IPv4/IPv6.
pdp_other_addr: string &optional;
};
type gtp_rai: record {
mcc: count;
mnc: count;
lac: count;
rac: count;
};
type gtp_qos_profile: record {
priority: count;
data: string;
};
type gtp_private_extension: record {
id: count;
value: string;
};
type gtp_create_pdp_ctx_request_elements: record {
imsi: gtp_imsi &optional;
rai: gtp_rai &optional;
recovery: gtp_recovery &optional;
select_mode: gtp_selection_mode &optional;
data1: gtp_teid1;
cp: gtp_teid_control_plane &optional;
nsapi: gtp_nsapi;
linked_nsapi: gtp_nsapi &optional;
charge_character: gtp_charging_characteristics &optional;
trace_ref: gtp_trace_reference &optional;
trace_type: gtp_trace_type &optional;
end_user_addr: gtp_end_user_addr &optional;
ap_name: gtp_access_point_name &optional;
opts: gtp_proto_config_options &optional;
signal_addr: gtp_gsn_addr;
user_addr: gtp_gsn_addr;
msisdn: gtp_msisdn &optional;
qos_prof: gtp_qos_profile;
tft: gtp_tft &optional;
trigger_id: gtp_trigger_id &optional;
omc_id: gtp_omc_id &optional;
ext: gtp_private_extension &optional;
};
type gtp_create_pdp_ctx_response_elements: record {
cause: gtp_cause;
reorder_req: gtp_reordering_required &optional;
recovery: gtp_recovery &optional;
data1: gtp_teid1 &optional;
cp: gtp_teid_control_plane &optional;
charging_id: gtp_charging_id &optional;
end_user_addr: gtp_end_user_addr &optional;
opts: gtp_proto_config_options &optional;
cp_addr: gtp_gsn_addr &optional;
user_addr: gtp_gsn_addr &optional;
qos_prof: gtp_qos_profile &optional;
charge_gateway: gtp_charging_gateway_addr &optional;
ext: gtp_private_extension &optional;
};
type gtp_update_pdp_ctx_request_elements: record {
imsi: gtp_imsi &optional;
rai: gtp_rai &optional;
recovery: gtp_recovery &optional;
data1: gtp_teid1;
cp: gtp_teid_control_plane &optional;
nsapi: gtp_nsapi;
trace_ref: gtp_trace_reference &optional;
trace_type: gtp_trace_type &optional;
cp_addr: gtp_gsn_addr;
user_addr: gtp_gsn_addr;
qos_prof: gtp_qos_profile;
tft: gtp_tft &optional;
trigger_id: gtp_trigger_id &optional;
omc_id: gtp_omc_id &optional;
ext: gtp_private_extension &optional;
end_user_addr: gtp_end_user_addr &optional;
};
type gtp_update_pdp_ctx_response_elements: record {
cause: gtp_cause;
recovery: gtp_recovery &optional;
data1: gtp_teid1 &optional;
cp: gtp_teid_control_plane &optional;
charging_id: gtp_charging_id &optional;
cp_addr: gtp_gsn_addr &optional;
user_addr: gtp_gsn_addr &optional;
qos_prof: gtp_qos_profile &optional;
charge_gateway: gtp_charging_gateway_addr &optional;
ext: gtp_private_extension &optional;
};
type gtp_delete_pdp_ctx_request_elements: record {
teardown_ind: gtp_teardown_ind &optional;
nsapi: gtp_nsapi;
ext: gtp_private_extension &optional;
};
type gtp_delete_pdp_ctx_response_elements: record {
cause: gtp_cause;
ext: gtp_private_extension &optional;
};
## Definition of "secondary filters". A secondary filter is a BPF filter given as
## index in this table. For each such filter, the corresponding event is raised for
## all matching packets.
@ -2507,7 +2647,7 @@ type ModbusHeaders: record {
module SOCKS;
export {
## This record is for a SOCKS client or server to provide either a
## This record is for a SOCKS client or server to provide either a
## name or an address to represent a desired or established connection.
type Address: record {
host: addr &optional;
@ -2608,6 +2748,15 @@ const gap_report_freq = 1.0 sec &redef;
## .. bro:see:: content_gap gap_report partial_connection
const report_gaps_for_partial = F &redef;
## Flag to prevent Bro from exiting automatically when input is exhausted.
## Normally Bro terminates when all packets sources have gone dry
## and communication isn't enabled. If this flag is set, Bro's main loop will
## instead keep idleing until :bro:see::`terminate` is explicitly called.
##
## This is mainly for testing purposes when termination behaviour needs to be
## controlled for reproducing results.
const exit_only_after_terminate = F &redef;
## The CA certificate file to authorize remote Bros/Broccolis.
##
## .. bro:see:: ssl_private_key ssl_passphrase
@ -2857,6 +3006,25 @@ export {
} # end export
module GLOBAL;
module Reporter;
export {
## Tunable for sending reporter info messages to STDERR. The option to
## turn it off is presented here in case Bro is being run by some
## external harness and shouldn't output anything to the console.
const info_to_stderr = T &redef;
## Tunable for sending reporter warning messages to STDERR. The option to
## turn it off is presented here in case Bro is being run by some
## external harness and shouldn't output anything to the console.
const warnings_to_stderr = T &redef;
## Tunable for sending reporter error messages to STDERR. The option to
## turn it off is presented here in case Bro is being run by some
## external harness and shouldn't output anything to the console.
const errors_to_stderr = T &redef;
}
module GLOBAL;
## Number of bytes per packet to capture from live interfaces.
const snaplen = 8192 &redef;

View file

@ -73,7 +73,7 @@ event http_message_done(c: connection, is_orig: bool, stat: http_message_stat) &
delete c$http$md5_handle;
NOTICE([$note=MD5, $msg=fmt("%s %s %s", c$id$orig_h, c$http$md5, url),
$sub=c$http$md5, $conn=c, $URL=url]);
$sub=c$http$md5, $conn=c]);
}
}

View file

@ -68,9 +68,7 @@ event signature_match(state: signature_state, msg: string, data: string) &priori
local message = fmt("%s %s %s", msg, c$http$method, url);
NOTICE([$note=Incorrect_File_Type,
$msg=message,
$conn=c,
$method=c$http$method,
$URL=url]);
$conn=c]);
}
}

View file

@ -67,7 +67,7 @@ event socks_request(c: connection, version: count, request_type: count,
# proxied connection. We treat this as a singular "tunnel".
local cid = copy(c$id);
cid$orig_p = 0/tcp;
Tunnel::register([$cid=cid, $tunnel_type=Tunnel::SOCKS, $payload_proxy=T]);
Tunnel::register([$cid=cid, $tunnel_type=Tunnel::SOCKS]);
}
event socks_reply(c: connection, version: count, reply: count, sa: SOCKS::Address, p: port) &priority=5

View file

@ -1,3 +1,3 @@
@load ./consts
@load ./main
@load ./mozilla-ca-list
@load ./mozilla-ca-list

View file

@ -72,6 +72,17 @@ export {
## utility.
const openssl_util = "openssl" &redef;
## The maximum amount of time a script can delay records from being logged.
const max_log_delay = 15secs &redef;
## Delays an SSL record for a specific token: the record will not be logged
## as longs the token exists or until :bro:id:`SSL::max_log_delay` elapses.
global delay_log: function(info: Info, token: string);
## Undelays an SSL record for a previously inserted token, allowing the
## record to be logged.
global undelay_log: function(info: Info, token: string);
## Event that can be handled to access the SSL
## record as it is sent on to the logging framework.
global log_ssl: event(rec: Info);
@ -81,6 +92,13 @@ redef record connection += {
ssl: Info &optional;
};
redef record Info += {
# Adding a string "token" to this set will cause the SSL script
# to delay logging the record until either the token has been removed or
# the record has been delayed for :bro:id:`SSL::max_log_delay`.
delay_tokens: set[string] &optional;
};
event bro_init() &priority=5
{
Log::create_stream(SSL::LOG, [$columns=Info, $ev=log_ssl]);
@ -115,6 +133,13 @@ redef likely_server_ports += {
989/tcp, 990/tcp, 992/tcp, 993/tcp, 995/tcp, 5223/tcp
};
# A queue that buffers log records.
global log_delay_queue: table[count] of Info;
# The top queue index where records are added.
global log_delay_queue_head = 0;
# The bottom queue index that points to the next record to be flushed.
global log_delay_queue_tail = 0;
function set_session(c: connection)
{
if ( ! c?$ssl )
@ -122,12 +147,65 @@ function set_session(c: connection)
$client_cert_chain=vector()];
}
function delay_log(info: Info, token: string)
{
info$delay_tokens = set();
add info$delay_tokens[token];
log_delay_queue[log_delay_queue_head] = info;
++log_delay_queue_head;
}
function undelay_log(info: Info, token: string)
{
if ( token in info$delay_tokens )
delete info$delay_tokens[token];
}
global log_record: function(info: Info);
event delay_logging(info: Info)
{
log_record(info);
}
function log_record(info: Info)
{
if ( ! info?$delay_tokens || |info$delay_tokens| == 0 )
{
Log::write(SSL::LOG, info);
}
else
{
for ( unused_index in log_delay_queue )
{
if ( log_delay_queue_head == log_delay_queue_tail )
return;
if ( |log_delay_queue[log_delay_queue_tail]$delay_tokens| > 0 )
{
if ( info$ts + max_log_delay > network_time() )
{
schedule 1sec { delay_logging(info) };
return;
}
else
{
Reporter::info(fmt("SSL delay tokens not released in time (%s)",
info$delay_tokens));
}
}
Log::write(SSL::LOG, log_delay_queue[log_delay_queue_tail]);
delete log_delay_queue[log_delay_queue_tail];
++log_delay_queue_tail;
}
}
}
function finish(c: connection)
{
Log::write(SSL::LOG, c$ssl);
log_record(c$ssl);
if ( disable_analyzer_after_detection && c?$ssl && c$ssl?$analyzer_id )
disable_analyzer(c$id, c$ssl$analyzer_id);
delete c$ssl;
}
event ssl_client_hello(c: connection, version: count, possible_ts: time, session_id: string, ciphers: count_set) &priority=5
@ -228,3 +306,15 @@ event protocol_violation(c: connection, atype: count, aid: count,
if ( c?$ssl )
finish(c);
}
event bro_done()
{
if ( |log_delay_queue| == 0 )
return;
for ( unused_index in log_delay_queue )
{
Log::write(SSL::LOG, log_delay_queue[log_delay_queue_tail]);
delete log_delay_queue[log_delay_queue_tail];
++log_delay_queue_tail;
}
}

View file

@ -27,7 +27,7 @@ function compress_path(dir: string): string
const cdup_sep = /((\/)*([^\/]|\\\/)+)?((\/)+\.\.(\/)*)/;
local parts = split_n(dir, cdup_sep, T, 1);
if ( length(parts) > 1 )
if ( |parts| > 1 )
{
# reaching a point with two parent dir references back-to-back means
# we don't know about anything higher in the tree to pop off

View file

@ -6,7 +6,7 @@
## characters.
function is_string_binary(s: string): bool
{
return byte_len(gsub(s, /[\x00-\x7f]/, "")) * 100 / |s| >= 25;
return |gsub(s, /[\x00-\x7f]/, "")| * 100 / |s| >= 25;
}
## Joins a set of string together, with elements delimited by a constant string.