Remove trailing whitespace from script files

This commit is contained in:
Tim Wojtulewicz 2021-10-20 09:55:11 -07:00
parent 303e84ad86
commit a6378531db
78 changed files with 310 additions and 325 deletions

View file

@ -2,7 +2,7 @@
##!
##! The manager is passive (the workers connect to us), and once connected
##! the manager registers for the events on the workers that are needed
##! to get the desired data from the workers. This script will be
##! to get the desired data from the workers. This script will be
##! automatically loaded if necessary based on the type of node being started.
##! This is where the cluster manager sets it's specific settings for other

View file

@ -364,7 +364,7 @@ event zeek_init() &priority=-5
if ( manager_is_logger )
{
local mgr = nodes_with_type(Cluster::MANAGER);
if ( |mgr| > 0 )
{
local eln = pool_eligibility[Cluster::LOGGER]$eligible_nodes;
@ -438,7 +438,7 @@ event zeek_init() &priority=-5
pet = pool_eligibility[pool$spec$node_type];
local nodes_to_init = |pet$eligible_nodes|;
if ( pool$spec?$max_nodes &&
pool$spec$max_nodes < |pet$eligible_nodes| )
nodes_to_init = pool$spec$max_nodes;

View file

@ -35,7 +35,7 @@ export {
## Number of protocol violations to tolerate before disabling an analyzer.
option max_violations: table[Analyzer::Tag] of count = table() &default = 5;
## Analyzers which you don't want to throw
## Analyzers which you don't want to throw
option ignore_violations: set[Analyzer::Tag] = set();
## Ignore violations which go this many bytes into the connection.

View file

@ -252,7 +252,7 @@ signature file-mpqgame {
file-magic /^MPQ\x1a/
}
# Blizzard CASC Format game file
# Blizzard CASC Format game file
signature file-blizgame {
file-mime "application/x-blizgame", 100
file-magic /^BLTE/
@ -302,4 +302,3 @@ signature file-iso9660 {
file-mime "application/x-iso9660-image", 99
file-magic /CD001/
}

View file

@ -1,7 +1,6 @@
# This signature is non-specific and terrible but after
# searching for a long time there doesn't seem to be a
# better option.
# searching for a long time there doesn't seem to be a
# better option.
signature file-msword {
file-magic /^\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1/
file-mime "application/msword", 50

View file

@ -104,7 +104,7 @@ export {
missing_bytes: count &log &default=0;
## The number of bytes in the file stream that were not delivered to
## stream file analyzers. This could be overlapping bytes or
## stream file analyzers. This could be overlapping bytes or
## bytes that couldn't be reassembled.
overflow_bytes: count &log &default=0;
@ -150,7 +150,7 @@ export {
## f: the file.
global enable_reassembly: function(f: fa_file);
## Disables the file reassembler on this file. If the file is not
## Disables the file reassembler on this file. If the file is not
## transferred out of order this will have no effect.
##
## f: the file.
@ -266,7 +266,7 @@ export {
};
## Register callbacks for protocols that work with the Files framework.
## The callbacks must uniquely identify a file and each protocol can
## The callbacks must uniquely identify a file and each protocol can
## only have a single callback registered for it.
##
## tag: Tag for the protocol analyzer having a callback being registered.
@ -280,7 +280,7 @@ export {
## manipulation when they are being added to a file before the core code
## takes over. This is unlikely to be interesting for users and should
## only be called by file analyzer authors but is *not required*.
##
##
## tag: Tag for the file analyzer.
##
## callback: Function to execute when the given file analyzer is being added.

View file

@ -49,7 +49,7 @@ export {
## A URL for more information about the data.
url: string &optional;
};
## Represents a piece of intelligence.
type Item: record {
## The intelligence indicator.
@ -57,12 +57,12 @@ export {
## The type of data that the indicator field represents.
indicator_type: Type;
## Metadata for the item. Typically represents more deeply
## descriptive data for a piece of intelligence.
meta: MetaData;
};
## Enum to represent where data came from when it was discovered.
## The convention is to prefix the name with ``IN_``.
type Where: enum {
@ -158,8 +158,8 @@ export {
global extend_match: hook(info: Info, s: Seen, items: set[Item]);
## The expiration timeout for intelligence items. Once an item expires, the
## :zeek:id:`Intel::item_expired` hook is called. Reinsertion of an item
## resets the timeout. A negative value disables expiration of intelligence
## :zeek:id:`Intel::item_expired` hook is called. Reinsertion of an item
## resets the timeout. A negative value disables expiration of intelligence
## items.
const item_expiration = -1 min &redef;

View file

@ -41,7 +41,7 @@ export {
name: function(state: PluginState) : string;
## If true, plugin can expire rules itself. If false, the NetControl
## framework will manage rule expiration.
## framework will manage rule expiration.
can_expire: bool;
## One-time initialization function called when plugin gets registered, and

View file

@ -46,7 +46,7 @@ function debug_add_rule(p: PluginState, r: Rule) : bool
local s = fmt("add_rule: %s", r);
debug_log(p, s);
if ( do_something(p) )
if ( do_something(p) )
{
event NetControl::rule_added(r, p);
return T;
@ -76,12 +76,10 @@ global debug_plugin = Plugin(
function create_debug(do_something: bool) : PluginState
{
local p: PluginState = [$plugin=debug_plugin];
# FIXME: Why's the default not working?
p$config = table();
p$config["all"] = (do_something ? "1" : "0");
return p;
}

View file

@ -1,7 +1,7 @@
##! NetControl plugin for the process-level PacketFilter that comes with
##! Zeek. Since the PacketFilter in Zeek is quite limited in scope
##! and can only add/remove filters for addresses, this is quite
##! limited in scope at the moment.
##! limited in scope at the moment.
@load ../plugin
@ -110,4 +110,3 @@ function create_packetfilter() : PluginState
return p;
}

View file

@ -1,7 +1,7 @@
##! This file defines the types that are used by the NetControl framework.
##!
##! The most important type defined in this file is :zeek:see:`NetControl::Rule`,
##! which is used to describe all rules that can be expressed by the NetControl framework.
##! which is used to describe all rules that can be expressed by the NetControl framework.
module NetControl;

View file

@ -1,6 +1,6 @@
##! This script adds geographic location data to notices for the "remote"
##! host in a connection. It does make the assumption that one of the
##! addresses in a connection is "local" and one is "remote" which is
##! host in a connection. It does make the assumption that one of the
##! addresses in a connection is "local" and one is "remote" which is
##! probably a safe assumption to make in most cases. If both addresses
##! are remote, it will use the $src address.
@ -17,13 +17,13 @@ export {
## in order for this to work.
ACTION_ADD_GEODATA
};
redef record Info += {
## If GeoIP support is built in, notices can have geographic
## information attached to them.
remote_location: geo_location &log &optional;
};
## Notice types which should have the "remote" location looked up.
## If GeoIP support is not built in, this does nothing.
option lookup_location_types: set[Notice::Type] = {};
@ -35,7 +35,7 @@ hook policy(n: Notice::Info) &priority=10
add n$actions[ACTION_ADD_GEODATA];
}
# This is handled at a high priority in case other notice handlers
# This is handled at a high priority in case other notice handlers
# want to use the data.
hook notice(n: Notice::Info) &priority=10
{

View file

@ -10,9 +10,9 @@ module Notice;
export {
redef enum Action += {
## Indicate that the generated email should be addressed to the
## Indicate that the generated email should be addressed to the
## appropriate email addresses as found by the
## :zeek:id:`Site::get_emails` function based on the relevant
## :zeek:id:`Site::get_emails` function based on the relevant
## address or addresses indicated in the notice.
ACTION_EMAIL_ADMIN
};

View file

@ -112,12 +112,12 @@ function lookup_controller(name: string): vector of Controller
if ( Cluster::local_node_type() != Cluster::MANAGER )
return vector();
# I am not quite sure if we can actually get away with this - in the
# I am not quite sure if we can actually get away with this - in the
# current state, this means that the individual nodes cannot lookup
# a controller by name.
#
# This means that there can be no reactions to things on the actual
# worker nodes - because they cannot look up a name. On the other hand -
# worker nodes - because they cannot look up a name. On the other hand -
# currently we also do not even send the events to the worker nodes (at least
# not if we are using broker). Because of that I am not really feeling that
# badly about it...

View file

@ -60,7 +60,7 @@ export {
SIG_ALARM_PER_ORIG,
## Alarm once and then never again.
SIG_ALARM_ONCE,
## Count signatures per responder host and alarm with the
## Count signatures per responder host and alarm with the
## :zeek:enum:`Signatures::Count_Signature` notice if a threshold
## defined by :zeek:id:`Signatures::count_thresholds` is reached.
SIG_COUNT_PER_RESP,
@ -100,15 +100,15 @@ export {
## Number of hosts, from a summary count.
host_count: count &log &optional;
};
## Actions for a signature.
## Actions for a signature.
const actions: table[string] of Action = {
["unspecified"] = SIG_IGNORE, # place-holder
} &redef &default = SIG_ALARM;
## Signature IDs that should always be ignored.
option ignored_ids = /NO_DEFAULT_MATCHES/;
## Generate a notice if, for a pair [orig, signature], the number of
## different responders has reached one of the thresholds.
const horiz_scan_thresholds = { 5, 10, 50, 100, 500, 1000 } &redef;
@ -120,7 +120,7 @@ export {
## Generate a notice if a :zeek:enum:`Signatures::SIG_COUNT_PER_RESP`
## signature is triggered as often as given by one of these thresholds.
const count_thresholds = { 5, 10, 50, 100, 500, 1000, 10000, 1000000, } &redef;
## The interval between when :zeek:enum:`Signatures::Signature_Summary`
## notices are generated.
option summary_interval = 1 day;
@ -147,7 +147,7 @@ event zeek_init() &priority=5
{
Log::create_stream(Signatures::LOG, [$columns=Info, $ev=log_signature, $path="signatures", $policy=log_policy]);
}
# Returns true if the given signature has already been triggered for the given
# [orig, resp] pair.
function has_signature_matched(id: string, orig: addr, resp: addr): bool
@ -173,7 +173,7 @@ event signature_match(state: signature_state, msg: string, data: string)
# Trim the matched data down to something reasonable
if ( |data| > 140 )
data = fmt("%s...", sub_bytes(data, 0, 140));
local src_addr: addr;
local src_port: port;
local dst_addr: addr;
@ -212,7 +212,7 @@ event signature_match(state: signature_state, msg: string, data: string)
local notice = F;
if ( action == SIG_ALARM )
notice = T;
if ( action == SIG_COUNT_PER_RESP )
{
local dst = state$conn$id$resp_h;
@ -252,7 +252,7 @@ event signature_match(state: signature_state, msg: string, data: string)
$conn=state$conn, $src=src_addr,
$dst=dst_addr, $msg=fmt("%s: %s", src_addr, msg),
$sub=data]);
if ( action == SIG_FILE_BUT_NO_SCAN || action == SIG_SUMMARY )
return;
@ -279,7 +279,7 @@ event signature_match(state: signature_state, msg: string, data: string)
fmt("%s has triggered signature %s on %d hosts",
orig, sig_id, hcount);
Log::write(Signatures::LOG,
Log::write(Signatures::LOG,
[$ts=network_time(), $note=Multiple_Sig_Responders,
$src_addr=orig, $sig_id=sig_id, $event_msg=msg,
$host_count=hcount, $sub_msg=horz_scan_msg]);
@ -296,9 +296,9 @@ event signature_match(state: signature_state, msg: string, data: string)
fmt("%s has triggered %d different signatures on host %s",
orig, vcount, resp);
Log::write(Signatures::LOG,
Log::write(Signatures::LOG,
[$ts=network_time(),
$note=Multiple_Signatures,
$note=Multiple_Signatures,
$src_addr=orig,
$dst_addr=resp, $sig_id=sig_id, $sig_count=vcount,
$event_msg=fmt("%s different signatures triggered", vcount),
@ -311,4 +311,3 @@ event signature_match(state: signature_state, msg: string, data: string)
last_vthresh[orig] = vcount;
}
}

View file

@ -13,18 +13,18 @@ module Software;
export {
## The software logging stream identifier.
redef enum Log::ID += { LOG };
## A default logging policy hook for the stream.
global log_policy: Log::PolicyHook;
## Scripts detecting new types of software need to redef this enum to add
## their own specific software types which would then be used when they
## their own specific software types which would then be used when they
## create :zeek:type:`Software::Info` records.
type Type: enum {
## A placeholder type for when the type of software is not known.
UNKNOWN,
};
## A structure to represent the numeric version of software.
type Version: record {
## Major version number.
@ -38,7 +38,7 @@ export {
## Additional version string (e.g. "beta42").
addl: string &optional;
} &log;
## The record type that is used for representing and logging software.
type Info: record {
## The time at which the software was detected.
@ -58,9 +58,9 @@ export {
## parsing doesn't always work reliably in all cases and this
## acts as a fallback in the logs.
unparsed_version: string &log &optional;
## This can indicate that this software being detected should
## definitely be sent onward to the logging framework. By
## definitely be sent onward to the logging framework. By
## default, only software that is "interesting" due to a change
## in version or it being currently unknown is sent to the
## logging framework. This can be set to T to force the record
@ -68,7 +68,7 @@ export {
## tracking needs to happen in a specific way to the software.
force_log: bool &default=F;
};
## Hosts whose software should be detected and tracked.
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS.
option asset_tracking = LOCAL_HOSTS;
@ -78,21 +78,21 @@ export {
## id: The connection id where the software was discovered.
##
## info: A record representing the software discovered.
##
##
## Returns: T if the software was logged, F otherwise.
global found: function(id: conn_id, info: Info): bool;
## Compare two version records.
##
##
## Returns: -1 for v1 < v2, 0 for v1 == v2, 1 for v1 > v2.
## If the numerical version numbers match, the *addl* string
## is compared lexicographically.
global cmp_versions: function(v1: Version, v2: Version): int;
## Sometimes software will expose itself on the network with
## slight naming variations. This table provides a mechanism
## for a piece of software to be renamed to a single name
## even if it exposes itself with an alternate name. The
## Sometimes software will expose itself on the network with
## slight naming variations. This table provides a mechanism
## for a piece of software to be renamed to a single name
## even if it exposes itself with an alternate name. The
## yielded string is the name that will be logged and generally
## used for everything.
global alternate_names: table[string] of string {
@ -100,17 +100,17 @@ export {
} &default=function(a: string): string { return a; };
## Type to represent a collection of :zeek:type:`Software::Info` records.
## It's indexed with the name of a piece of software such as "Firefox"
## It's indexed with the name of a piece of software such as "Firefox"
## and it yields a :zeek:type:`Software::Info` record with more
## information about the software.
type SoftwareSet: table[string] of Info;
## The set of software associated with an address. Data expires from
## this table after one day by default so that a detected piece of
## this table after one day by default so that a detected piece of
## software will be logged once each day. In a cluster, this table is
## uniformly distributed among proxy nodes.
global tracked: table[addr] of SoftwareSet &create_expire=1day;
## This event can be handled to access the :zeek:type:`Software::Info`
## record as it is sent on to the logging framework.
global log_software: event(rec: Info);
@ -128,7 +128,7 @@ event zeek_init() &priority=5
{
Log::create_stream(Software::LOG, [$columns=Info, $ev=log_software, $path="software", $policy=log_policy]);
}
type Description: record {
name: string;
version: Version;
@ -138,13 +138,13 @@ type Description: record {
# Defining this here because of a circular dependency between two functions.
global parse_mozilla: function(unparsed_version: string): Description;
# Don't even try to understand this now, just make sure the tests are
# Don't even try to understand this now, just make sure the tests are
# working.
function parse(unparsed_version: string): Description
{
local software_name = "<parse error>";
local v: Version;
# Parse browser-alike versions separately
if ( /^(Mozilla|Opera)\/[0-9]+\./ in unparsed_version )
{
@ -220,10 +220,10 @@ function parse(unparsed_version: string): Description
{
v$addl = strip(version_parts[2]);
}
}
}
if ( 3 in version_numbers && version_numbers[3] != "" )
v$minor3 = extract_count(version_numbers[3]);
if ( 2 in version_numbers && version_numbers[2] != "" )
@ -234,7 +234,7 @@ function parse(unparsed_version: string): Description
v$major = extract_count(version_numbers[0]);
}
}
return [$version=v, $unparsed_version=unparsed_version, $name=alternate_names[software_name]];
}
@ -245,7 +245,7 @@ function parse_with_cache(unparsed_version: string): Description
{
if (unparsed_version in parse_cache)
return parse_cache[unparsed_version];
local res = parse(unparsed_version);
parse_cache[unparsed_version] = res;
return res;
@ -256,7 +256,7 @@ function parse_mozilla(unparsed_version: string): Description
local software_name = "<unknown browser>";
local v: Version;
local parts: string_vec;
if ( /Opera [0-9\.]*$/ in unparsed_version )
{
software_name = "Opera";
@ -349,7 +349,7 @@ function parse_mozilla(unparsed_version: string): Description
if ( 2 in parts )
v = parse(parts[2])$version;
}
else if ( /AdobeAIR\/[0-9\.]*/ in unparsed_version )
{
software_name = "AdobeAIR";
@ -392,7 +392,7 @@ function cmp_versions(v1: Version, v2: Version): int
else
return v1?$major ? 1 : -1;
}
if ( v1?$minor && v2?$minor )
{
if ( v1$minor < v2$minor )
@ -407,7 +407,7 @@ function cmp_versions(v1: Version, v2: Version): int
else
return v1?$minor ? 1 : -1;
}
if ( v1?$minor2 && v2?$minor2 )
{
if ( v1$minor2 < v2$minor2 )
@ -462,7 +462,7 @@ function software_endpoint_name(id: conn_id, host: addr): string
# Convert a version into a string "a.b.c-x".
function software_fmt_version(v: Version): string
{
return fmt("%s%s%s%s%s",
return fmt("%s%s%s%s%s",
v?$major ? fmt("%d", v$major) : "0",
v?$minor ? fmt(".%d", v$minor) : "",
v?$minor2 ? fmt(".%d", v$minor2) : "",
@ -510,10 +510,10 @@ event Software::register(info: Info)
local changed = cmp_versions(old$version, info$version) != 0;
if ( changed )
event Software::version_change(old, info);
event Software::version_change(old, info);
else if ( ! info$force_log )
# If the version hasn't changed, then we're just redetecting the
# same thing, then we don't care.
# same thing, then we don't care.
return;
}
@ -526,7 +526,7 @@ function found(id: conn_id, info: Info): bool
if ( ! info$force_log && ! addr_matches_host(info$host, asset_tracking) )
return F;
if ( ! info?$ts )
if ( ! info?$ts )
info$ts = network_time();
if ( info?$version )

View file

@ -220,7 +220,7 @@ event zeek_init() &priority=100
# This variable is maintained by manager nodes as they collect and aggregate
# results.
# Index on a uid.
global stats_keys: table[string] of set[Key] &read_expire=1min
global stats_keys: table[string] of set[Key] &read_expire=1min
&expire_func=function(s: table[string] of set[Key], idx: string): interval
{
Reporter::warning(fmt("SumStat key request for the %s SumStat uid took longer than 1 minute and was automatically cancelled.", idx));

View file

@ -510,7 +510,7 @@ function check_thresholds(ss: SumStat, key: Key, result: Result, modify_pct: dou
return F;
# Add in the extra ResultVals to make threshold_vals easier to write.
# This length comparison should work because we just need to make
# This length comparison should work because we just need to make
# sure that we have the same number of reducers and results.
if ( |ss$reducers| != |result| )
{
@ -568,4 +568,3 @@ function threshold_crossed(ss: SumStat, key: Key, result: Result)
ss$threshold_crossed(key, result);
}

View file

@ -95,7 +95,7 @@ hook compose_resultvals_hook(result: ResultVal, rv1: ResultVal, rv2: ResultVal)
{
local other_vector: vector of Observation;
local othercount: count;
if ( rv1$sample_elements > rv2$sample_elements )
{
result$samples = copy(rv1$samples);

View file

@ -46,7 +46,7 @@ hook register_observe_plugins()
if ( ! r?$unique_max || |rv$unique_vals| <= r$unique_max )
add rv$unique_vals[obs];
rv$unique = |rv$unique_vals|;
});
}

View file

@ -1,5 +1,5 @@
##! This script can be used to extract either the originator's data or the
##! responders data or both. By default nothing is extracted, and in order
##! This script can be used to extract either the originator's data or the
##! responders data or both. By default nothing is extracted, and in order
##! to actually extract data the ``c$extract_orig`` and/or the
##! ``c$extract_resp`` variable must be set to ``T``. One way to achieve this
##! would be to handle the :zeek:id:`connection_established` event elsewhere
@ -19,7 +19,7 @@ export {
## The prefix given to files containing extracted connections as they
## are opened on disk.
option extraction_prefix = "contents";
## If this variable is set to ``T``, then all contents of all
## connections will be extracted.
option default_extract = F;
@ -38,7 +38,7 @@ event connection_established(c: connection) &priority=-5
local orig_f = open(orig_file);
set_contents_file(c$id, CONTENTS_ORIG, orig_f);
}
if ( c$extract_resp )
{
local resp_file = generate_extraction_filename(extraction_prefix, c, "resp.dat");

View file

@ -10,14 +10,14 @@ export {
# For interactive services, allow longer periods of inactivity.
[[Analyzer::ANALYZER_SSH, Analyzer::ANALYZER_FTP]] = 1 hrs,
};
## Define inactivity timeouts based on common protocol ports.
option port_inactivity_timeouts: table[port] of interval = {
[[21/tcp, 22/tcp, 23/tcp, 513/tcp]] = 1 hrs,
};
}
event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count)
{
if ( atype in analyzer_inactivity_timeouts )

View file

@ -17,7 +17,7 @@ export {
## The connection's 4-tuple of endpoint addresses/ports.
id : conn_id &log;
## Round trip time from the request to the response.
## If either the request or response wasn't seen,
## If either the request or response wasn't seen,
## this will be null.
rtt : interval &log &optional;

View file

@ -78,7 +78,7 @@ export {
## The DHCP message types seen by this DHCP transaction
msg_types: vector of string &log &default=string_vec();
## Duration of the DHCP "session" representing the
## Duration of the DHCP "session" representing the
## time from the first message to the last.
duration: interval &log &default=0secs;

View file

@ -375,7 +375,7 @@ hook DNS::do_reply(c: connection, msg: dns_msg, ans: dns_answer, reply: string)
if ( ! c$dns?$rtt )
{
c$dns$rtt = network_time() - c$dns$ts;
# This could mean that only a reply was seen since
# This could mean that only a reply was seen since
# we assume there must be some passage of time between
# request and response.
if ( c$dns$rtt == 0secs )
@ -547,9 +547,9 @@ event dns_SRV_reply(c: connection, msg: dns_msg, ans: dns_answer, target: string
#
# }
# event dns_EDNS_ecs(c: connection, msg: dns_msg, opt: dns_edns_ecs)
# {
#
# }
# {
#
# }
#
#event dns_TSIG_addl(c: connection, msg: dns_msg, ans: dns_tsig_additional)
# {

View file

@ -18,14 +18,14 @@ export {
## Describe the file being transferred.
global describe_file: function(f: fa_file): string;
redef record fa_file += {
redef record fa_file += {
ftp: FTP::Info &optional;
};
}
function get_file_handle(c: connection, is_orig: bool): string
{
if ( [c$id$resp_h, c$id$resp_p] !in ftp_data_expected )
if ( [c$id$resp_h, c$id$resp_p] !in ftp_data_expected )
return "";
return cat(Analyzer::ANALYZER_FTP_DATA, c$start_time, c$id, is_orig);
@ -54,7 +54,7 @@ event zeek_init() &priority=5
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5
{
if ( [c$id$resp_h, c$id$resp_p] !in ftp_data_expected )
if ( [c$id$resp_h, c$id$resp_p] !in ftp_data_expected )
return;
local ftp = ftp_data_expected[c$id$resp_h, c$id$resp_p];

View file

@ -11,12 +11,12 @@ export {
## Counter to track how many commands have been executed.
seq: count &default=0;
};
## Structure for tracking pending commands in the event that the client
## sends a large number of commands before the server has a chance to
## sends a large number of commands before the server has a chance to
## reply.
type PendingCmds: table[count] of CmdArg;
## Possible response codes for a wide variety of FTP commands.
option cmd_reply_code: set[string, count] = {
# According to RFC 959
@ -65,7 +65,7 @@ export {
["MDTM", [213, 500, 501, 550]], # RFC3659
["MLST", [150, 226, 250, 500, 501, 550]], # RFC3659
["MLSD", [150, 226, 250, 500, 501, 550]], # RFC3659
["CLNT", [200, 500]], # No RFC (indicate client software)
["MACB", [200, 500, 550]], # No RFC (test for MacBinary support)
@ -79,11 +79,11 @@ function add_pending_cmd(pc: PendingCmds, cmd: string, arg: string): CmdArg
{
local ca = [$cmd = cmd, $arg = arg, $seq=|pc|+1, $ts=network_time()];
pc[ca$seq] = ca;
return ca;
}
# Determine which is the best command to match with based on the
# Determine which is the best command to match with based on the
# response code and message.
function get_pending_cmd(pc: PendingCmds, reply_code: count, reply_msg: string): CmdArg
{
@ -94,18 +94,18 @@ function get_pending_cmd(pc: PendingCmds, reply_code: count, reply_msg: string):
for ( cmd_seq, cmd in pc )
{
local score: int = 0;
# if the command is compatible with the reply code
# code 500 (syntax error) is compatible with all commands
if ( reply_code == 500 || [cmd$cmd, reply_code] in cmd_reply_code )
score = score + 100;
# if the command or the command arg appears in the reply message
if ( strstr(reply_msg, cmd$cmd) > 0 )
score = score + 20;
if ( strstr(reply_msg, cmd$arg) > 0 )
score = score + 10;
if ( score > best_score ||
( score == best_score && best_seq > cmd_seq ) ) # break tie with sequence number
{
@ -132,7 +132,7 @@ function remove_pending_cmd(pc: PendingCmds, ca: CmdArg): bool
else
return F;
}
function pop_pending_cmd(pc: PendingCmds, reply_code: count, reply_msg: string): CmdArg
{
local ca = get_pending_cmd(pc, reply_code, reply_msg);

View file

@ -97,7 +97,7 @@ event http_header(c: connection, is_orig: bool, name: string, value: string) &pr
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5
{
if ( f$source == "HTTP" && c?$http )
if ( f$source == "HTTP" && c?$http )
{
f$http = c$http;
@ -199,6 +199,6 @@ event file_sniff(f: fa_file, meta: fa_metadata) &priority=5
event http_end_entity(c: connection, is_orig: bool) &priority=5
{
if ( c?$http && c$http?$current_entity )
if ( c?$http && c$http?$current_entity )
delete c$http$current_entity;
}

View file

@ -16,7 +16,7 @@ export {
##
## Returns: A vector of strings containing the keys.
global extract_keys: function(data: string, kv_splitter: pattern): string_vec;
## Creates a URL from an :zeek:type:`HTTP::Info` record. This should
## handle edge cases such as proxied requests appropriately.
##
@ -24,7 +24,7 @@ export {
##
## Returns: A URL, not prefixed by ``"http://"``.
global build_url: function(rec: Info): string;
## Creates a URL from an :zeek:type:`HTTP::Info` record. This should
## handle edge cases such as proxied requests appropriately.
##
@ -41,7 +41,7 @@ export {
function extract_keys(data: string, kv_splitter: pattern): string_vec
{
local key_vec: vector of string = vector();
local parts = split_string(data, kv_splitter);
for ( part_index in parts )
{
@ -64,7 +64,7 @@ function build_url(rec: Info): string
host = fmt("%s:%d", host, resp_p);
return fmt("%s%s", host, uri);
}
function build_url_http(rec: Info): string
{
return fmt("http://%s", build_url(rec));

View file

@ -31,7 +31,7 @@ event zeek_init() &priority=5
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5
{
if ( [c$id$resp_h, c$id$resp_p] !in dcc_expected_transfers )
if ( [c$id$resp_h, c$id$resp_p] !in dcc_expected_transfers )
return;
local irc = dcc_expected_transfers[c$id$resp_h, c$id$resp_p];

View file

@ -1,11 +1,11 @@
##! Implements the core IRC analysis support. The logging model is to log
##! IRC commands along with the associated response and some additional
##! IRC commands along with the associated response and some additional
##! metadata about the connection if it's available.
module IRC;
export {
redef enum Log::ID += { LOG };
global log_policy: Log::PolicyHook;
@ -21,7 +21,7 @@ export {
nick: string &log &optional;
## Username given for the connection.
user: string &log &optional;
## Command given by the client.
command: string &log &optional;
## Value for the command given by the client.
@ -29,8 +29,8 @@ export {
## Any additional data for the command.
addl: string &log &optional;
};
## Event that can be handled to access the IRC record as it is sent on
## Event that can be handled to access the IRC record as it is sent on
## to the logging framework.
global irc_log: event(rec: Info);
}
@ -48,7 +48,7 @@ event zeek_init() &priority=5
Log::create_stream(IRC::LOG, [$columns=Info, $ev=irc_log, $path="irc", $policy=log_policy]);
Analyzer::register_for_ports(Analyzer::ANALYZER_IRC, ports);
}
function new_session(c: connection): Info
{
local info: Info;
@ -57,12 +57,12 @@ function new_session(c: connection): Info
info$id = c$id;
return info;
}
function set_session(c: connection)
{
if ( ! c?$irc )
c$irc = new_session(c);
c$irc$ts=network_time();
}

View file

@ -95,7 +95,7 @@ function set_session(c: connection): bool
$id = c$id);
Conn::register_removal_hook(c, finalize_krb);
}
return c$krb$logged;
}
@ -115,7 +115,7 @@ event krb_error(c: connection, msg: Error_Msg) &priority=5
if ( msg?$error_text && msg$error_text in ignored_errors )
{
if ( c?$krb )
if ( c?$krb )
delete c$krb;
return;
@ -174,7 +174,7 @@ event krb_as_response(c: connection, msg: KDC_Response) &priority=5
if ( ! c$krb?$client && ( msg?$client_name || msg?$client_realm ) )
{
c$krb$client = fmt("%s/%s", msg?$client_name ? msg$client_name : "",
c$krb$client = fmt("%s/%s", msg?$client_name ? msg$client_name : "",
msg?$client_realm ? msg$client_realm : "");
}
@ -202,7 +202,7 @@ event krb_tgs_request(c: connection, msg: KDC_Request) &priority=5
c$krb$request_type = "TGS";
if ( msg?$service_name )
c$krb$service = msg$service_name;
if ( msg?$from )
if ( msg?$from )
c$krb$from = msg$from;
if ( msg?$till )
c$krb$till = msg$till;
@ -221,7 +221,7 @@ event krb_tgs_response(c: connection, msg: KDC_Response) &priority=5
if ( ! c$krb?$client && ( msg?$client_name || msg?$client_realm ) )
{
c$krb$client = fmt("%s/%s", msg?$client_name ? msg$client_name : "",
c$krb$client = fmt("%s/%s", msg?$client_name ? msg$client_name : "",
msg?$client_realm ? msg$client_realm : "");
}

View file

@ -33,7 +33,7 @@ export {
## Indicate whether or not the authentication was successful.
success : bool &log &optional;
## Internally used field to indicate if the login attempt
## Internally used field to indicate if the login attempt
## has already been logged.
done: bool &default=F;
};

View file

@ -24,7 +24,7 @@ export {
mac : string &log &optional;
## The address given to the network access server, if
## present. This is only a hint from the RADIUS server
## and the network access server is not required to honor
## and the network access server is not required to honor
## the address.
framed_addr : addr &log &optional;
## Address (IPv4, IPv6, or FQDN) of the initiator end of the tunnel,
@ -33,7 +33,7 @@ export {
tunnel_client: string &log &optional;
## Connect info, if present.
connect_info : string &log &optional;
## Reply message from the server challenge. This is
## Reply message from the server challenge. This is
## frequently shown to the user authenticating.
reply_msg : string &log &optional;
## Successful or failed authentication.

View file

@ -41,15 +41,15 @@ export {
desktop_width: count &log &optional;
## Desktop height of the client machine.
desktop_height: count &log &optional;
## The color depth requested by the client in
## The color depth requested by the client in
## the high_color_depth field.
requested_color_depth: string &log &optional;
## If the connection is being encrypted with native
## RDP encryption, this is the type of cert
## RDP encryption, this is the type of cert
## being used.
cert_type: string &log &optional;
## The number of certs seen. X.509 can transfer an
## The number of certs seen. X.509 can transfer an
## entire certificate chain.
cert_count: count &log &default=0;
## Indicates if the provided certificate or certificate
@ -57,7 +57,7 @@ export {
cert_permanent: bool &log &optional;
## Encryption level of the connection.
encryption_level: string &log &optional;
## Encryption method of the connection.
## Encryption method of the connection.
encryption_method: string &log &optional;
};
@ -65,7 +65,7 @@ export {
## continuing to process encrypted traffic.
option disable_analyzer_after_detection = F;
## The amount of time to monitor an RDP session from when it is first
## The amount of time to monitor an RDP session from when it is first
## identified. When this interval is reached, the session is logged.
option rdp_check_interval = 10secs;
@ -113,7 +113,7 @@ function write_log(c: connection)
info$done = T;
# Verify that the RDP session contains
# RDP data before writing it to the log.
# RDP data before writing it to the log.
if ( info?$cookie || info?$keyboard_layout || info?$result )
Log::write(RDP::LOG, info);
}
@ -124,16 +124,16 @@ event check_record(c: connection)
if ( c$rdp$done )
return;
# If the value rdp_check_interval has passed since the
# RDP session was started, then log the record.
# If the value rdp_check_interval has passed since the
# RDP session was started, then log the record.
local diff = network_time() - c$rdp$ts;
if ( diff > rdp_check_interval )
{
write_log(c);
# Remove the analyzer if it is still attached.
if ( disable_analyzer_after_detection &&
connection_exists(c$id) &&
if ( disable_analyzer_after_detection &&
connection_exists(c$id) &&
c$rdp?$analyzer_id )
{
disable_analyzer(c$id, c$rdp$analyzer_id);
@ -240,7 +240,7 @@ event rdp_server_certificate(c: connection, cert_type: count, permanently_issued
# now so we manually count this one.
if ( c$rdp$cert_type == "RSA" )
++c$rdp$cert_count;
c$rdp$cert_permanent = permanently_issued;
}

View file

@ -107,13 +107,13 @@ export {
} &redef &default=function(i: count):string { return fmt("unknown-wksta-command-%d", i); };
type rpc_cmd_table: table[count] of string;
## The subcommands for RPC endpoints.
const rpc_sub_cmds: table[string] of rpc_cmd_table = {
["4b324fc8-1670-01d3-1278-5a47bf6ee188"] = srv_cmds,
["6bffd098-a112-3610-9833-46c3f87e345a"] = wksta_cmds,
["6bffd098-a112-3610-9833-46c3f87e345a"] = wksta_cmds,
} &redef &default=function(i: string):rpc_cmd_table { return table() &default=function(j: string):string { return fmt("unknown-uuid-%s", j); }; };
}
module SMB1;
@ -195,37 +195,37 @@ export {
} &default=function(i: count):string { return fmt("unknown-%d", i); };
const trans2_sub_commands: table[count] of string = {
[0x00] = "OPEN2",
[0x01] = "FIND_FIRST2",
[0x02] = "FIND_NEXT2",
[0x03] = "QUERY_FS_INFORMATION",
[0x04] = "SET_FS_INFORMATION",
[0x05] = "QUERY_PATH_INFORMATION",
[0x06] = "SET_PATH_INFORMATION",
[0x07] = "QUERY_FILE_INFORMATION",
[0x08] = "SET_FILE_INFORMATION",
[0x09] = "FSCTL",
[0x0A] = "IOCTL",
[0x0B] = "FIND_NOTIFY_FIRST",
[0x0C] = "FIND_NOTIFY_NEXT",
[0x0D] = "CREATE_DIRECTORY",
[0x0E] = "SESSION_SETUP",
[0x10] = "GET_DFS_REFERRAL",
[0x11] = "REPORT_DFS_INCONSISTENCY",
[0x00] = "OPEN2",
[0x01] = "FIND_FIRST2",
[0x02] = "FIND_NEXT2",
[0x03] = "QUERY_FS_INFORMATION",
[0x04] = "SET_FS_INFORMATION",
[0x05] = "QUERY_PATH_INFORMATION",
[0x06] = "SET_PATH_INFORMATION",
[0x07] = "QUERY_FILE_INFORMATION",
[0x08] = "SET_FILE_INFORMATION",
[0x09] = "FSCTL",
[0x0A] = "IOCTL",
[0x0B] = "FIND_NOTIFY_FIRST",
[0x0C] = "FIND_NOTIFY_NEXT",
[0x0D] = "CREATE_DIRECTORY",
[0x0E] = "SESSION_SETUP",
[0x10] = "GET_DFS_REFERRAL",
[0x11] = "REPORT_DFS_INCONSISTENCY",
} &default=function(i: count):string { return fmt("unknown-trans2-sub-cmd-%d", i); };
const trans_sub_commands: table[count] of string = {
[0x01] = "SET_NMPIPE_STATE",
[0x11] = "RAW_READ_NMPIPE",
[0x21] = "QUERY_NMPIPE_STATE",
[0x22] = "QUERY_NMPIPE_INFO",
[0x23] = "PEEK_NMPIPE",
[0x26] = "TRANSACT_NMPIPE",
[0x31] = "RAW_WRITE_NMPIPE",
[0x36] = "READ_NMPIPE",
[0x37] = "WRITE_NMPIPE",
[0x53] = "WAIT_NMPIPE",
[0x54] = "CALL_NMPIPE",
[0x01] = "SET_NMPIPE_STATE",
[0x11] = "RAW_READ_NMPIPE",
[0x21] = "QUERY_NMPIPE_STATE",
[0x22] = "QUERY_NMPIPE_INFO",
[0x23] = "PEEK_NMPIPE",
[0x26] = "TRANSACT_NMPIPE",
[0x31] = "RAW_WRITE_NMPIPE",
[0x36] = "READ_NMPIPE",
[0x37] = "WRITE_NMPIPE",
[0x53] = "WAIT_NMPIPE",
[0x54] = "CALL_NMPIPE",
} &default=function(i: count):string { return fmt("unknown-trans-sub-cmd-%d", i); };
}

View file

@ -14,7 +14,7 @@ export {
function get_file_handle(c: connection, is_orig: bool): string
{
if ( ! (c$smb_state?$current_file &&
(c$smb_state$current_file?$name ||
(c$smb_state$current_file?$name ||
c$smb_state$current_file?$path)) )
{
# TODO - figure out what are the cases where this happens.

View file

@ -5,7 +5,7 @@
module SMB;
export {
redef enum Log::ID += {
redef enum Log::ID += {
AUTH_LOG,
MAPPING_LOG,
FILES_LOG
@ -13,7 +13,7 @@ export {
global log_policy_files: Log::PolicyHook;
global log_policy_mapping: Log::PolicyHook;
## Abstracted actions for SMB file actions.
type Action: enum {
FILE_READ,
@ -55,7 +55,7 @@ export {
id : conn_id &log;
## Unique ID of the file.
fuid : string &log &optional;
## Action this log record represents.
action : Action &log &optional;
## Path pulled from the tree this file was transferred to or from.
@ -99,14 +99,14 @@ export {
uid : string &log;
## ID of the connection the request was sent over.
id : conn_id &log;
## The command sent by the client.
command : string &log;
## The subcommand sent by the client, if present.
sub_command : string &log &optional;
## Command argument sent by the client, if any.
argument : string &log &optional;
## Server reply to the client's command.
status : string &log &optional;
## Round trip time from the request to the response.
@ -116,13 +116,13 @@ export {
## Authenticated username, if available.
username : string &log &optional;
## If this is related to a tree, this is the tree
## that was used for the current command.
tree : string &log &optional;
## The type of tree (disk share, printer share, named pipe, etc.).
tree_service : string &log &optional;
## If the command referenced a file, store it here.
referenced_file : FileInfo &log &optional;
## If the command referenced a tree, store it here.
@ -138,7 +138,7 @@ export {
current_file : FileInfo &optional;
## A reference to the current tree.
current_tree : TreeInfo &optional;
## Indexed on MID to map responses to requests.
pending_cmds : table[count] of CmdInfo &optional;
## File map to retrieve file information based on the file ID.
@ -161,7 +161,7 @@ export {
redef record connection += {
smb_state : State &optional;
};
## This is an internally used function.
const set_current_file: function(smb_state: State, file_id: count) &redef;
@ -195,7 +195,7 @@ function set_current_file(smb_state: State, file_id: count)
smb_state$fid_map[file_id] = smb_state$current_cmd$referenced_file;
smb_state$fid_map[file_id]$fid = file_id;
}
smb_state$current_cmd$referenced_file = smb_state$fid_map[file_id];
smb_state$current_file = smb_state$current_cmd$referenced_file;
}
@ -203,7 +203,7 @@ function set_current_file(smb_state: State, file_id: count)
function write_file_log(state: State)
{
local f = state$current_file;
if ( f?$name &&
if ( f?$name &&
f$action in logged_file_actions )
{
# Everything in this if statement is to avoid overlogging
@ -225,7 +225,7 @@ function write_file_log(state: State)
else
add state$recent_files[file_ident];
}
Log::write(FILES_LOG, f);
}
}
@ -240,7 +240,7 @@ event file_state_remove(f: fa_file) &priority=-5
{
if ( f$source != "SMB" )
return;
for ( id, c in f$conns )
{
if ( c?$smb_state && c$smb_state?$current_file)

View file

@ -39,12 +39,12 @@ event smb1_message(c: connection, hdr: SMB1::Header, is_orig: bool) &priority=5
{
smb_state$current_cmd$tree = smb_state$current_tree$path;
}
if ( smb_state$current_tree?$service )
{
smb_state$current_cmd$tree_service = smb_state$current_tree$service;
}
if ( mid !in smb_state$pending_cmds )
{
local tmp_cmd = SMB::CmdInfo($uid=c$uid, $id=c$id, $version="SMB1", $command = SMB1::commands[hdr$command]);
@ -52,10 +52,10 @@ event smb1_message(c: connection, hdr: SMB1::Header, is_orig: bool) &priority=5
local tmp_file = SMB::FileInfo($uid=c$uid, $id=c$id);
tmp_cmd$referenced_file = tmp_file;
tmp_cmd$referenced_tree = smb_state$current_tree;
smb_state$pending_cmds[mid] = tmp_cmd;
}
smb_state$current_cmd = smb_state$pending_cmds[mid];
if ( !is_orig )
@ -97,11 +97,11 @@ event smb1_negotiate_response(c: connection, hdr: SMB1::Header, response: SMB1::
delete c$smb_state$current_cmd$smb1_offered_dialects;
}
}
event smb1_negotiate_response(c: connection, hdr: SMB1::Header, response: SMB1::NegotiateResponse) &priority=-5
{
}
event smb1_tree_connect_andx_request(c: connection, hdr: SMB1::Header, path: string, service: string) &priority=5
{
local tmp_tree = SMB::TreeInfo($uid=c$uid, $id=c$id, $path=path, $service=service);
@ -117,7 +117,7 @@ event smb1_tree_connect_andx_response(c: connection, hdr: SMB1::Header, service:
c$smb_state$current_cmd$referenced_tree$share_type = "PIPE";
c$smb_state$current_cmd$tree_service = service;
if ( native_file_system != "" )
c$smb_state$current_cmd$referenced_tree$native_file_system = native_file_system;
@ -150,13 +150,13 @@ event smb1_nt_create_andx_response(c: connection, hdr: SMB1::Header, file_id: co
# I'm seeing negative data from IPC tree transfers
if ( time_to_double(times$modified) > 0.0 )
c$smb_state$current_cmd$referenced_file$times = times;
# We can identify the file by its file id now so let's stick it
# We can identify the file by its file id now so let's stick it
# in the file map.
c$smb_state$fid_map[file_id] = c$smb_state$current_cmd$referenced_file;
c$smb_state$current_file = c$smb_state$fid_map[file_id];
SMB::write_file_log(c$smb_state);
}
@ -167,7 +167,7 @@ event smb1_read_andx_request(c: connection, hdr: SMB1::Header, file_id: count, o
if ( c$smb_state$current_file?$name )
c$smb_state$current_cmd$argument = c$smb_state$current_file$name;
}
event smb1_read_andx_request(c: connection, hdr: SMB1::Header, file_id: count, offset: count, length: count) &priority=-5
{
if ( c$smb_state$current_tree?$path && !c$smb_state$current_file?$path )
@ -180,12 +180,12 @@ event smb1_write_andx_request(c: connection, hdr: SMB1::Header, file_id: count,
{
SMB::set_current_file(c$smb_state, file_id);
c$smb_state$current_file$action = SMB::FILE_WRITE;
if ( !c$smb_state$current_cmd?$argument &&
if ( !c$smb_state$current_cmd?$argument &&
# TODO: figure out why name isn't getting set sometimes.
c$smb_state$current_file?$name )
c$smb_state$current_cmd$argument = c$smb_state$current_file$name;
}
event smb1_write_andx_request(c: connection, hdr: SMB1::Header, file_id: count, offset: count, data_len: count) &priority=-5
{
if ( c$smb_state$current_tree?$path && !c$smb_state$current_file?$path )
@ -217,7 +217,7 @@ event smb1_close_request(c: connection, hdr: SMB1::Header, file_id: count) &prio
if ( fl?$name )
c$smb_state$current_cmd$argument = fl$name;
delete c$smb_state$fid_map[file_id];
SMB::write_file_log(c$smb_state);
@ -254,7 +254,7 @@ event smb1_session_setup_andx_response(c: connection, hdr: SMB1::Header, respons
{
# No behavior yet.
}
event smb1_transaction_request(c: connection, hdr: SMB1::Header, name: string, sub_cmd: count, parameters: string, data: string)
{
c$smb_state$current_cmd$sub_command = SMB1::trans_sub_commands[sub_cmd];
@ -267,7 +267,7 @@ event smb1_write_andx_request(c: connection, hdr: SMB1::Header, file_id: count,
# TODO: figure out why the uuid isn't getting set sometimes.
return;
}
c$smb_state$pipe_map[file_id] = c$smb_state$current_file$uuid;
}
@ -278,11 +278,11 @@ event smb_pipe_bind_ack_response(c: connection, hdr: SMB1::Header)
# TODO: figure out why the uuid isn't getting set sometimes.
return;
}
c$smb_state$current_cmd$sub_command = "RPC_BIND_ACK";
c$smb_state$current_cmd$argument = SMB::rpc_uuids[c$smb_state$current_file$uuid];
}
event smb_pipe_bind_request(c: connection, hdr: SMB1::Header, uuid: string, version: string)
{
if ( ! c$smb_state?$current_file || ! c$smb_state$current_file?$uuid )

View file

@ -19,7 +19,7 @@ event smb2_message(c: connection, hdr: SMB2::Header, is_orig: bool) &priority=5
state$pipe_map = table();
c$smb_state = state;
}
local smb_state = c$smb_state;
local tid = hdr$tree_id;
local mid = hdr$message_id;
@ -159,10 +159,10 @@ event smb2_create_response(c: connection, hdr: SMB2::Header, response: SMB2::Cre
if ( time_to_double(response$times$modified) > 0.0 )
c$smb_state$current_file$times = response$times;
# We can identify the file by its file id now so let's stick it
# We can identify the file by its file id now so let's stick it
# in the file map.
c$smb_state$fid_map[response$file_id$persistent+response$file_id$volatile] = c$smb_state$current_file;
c$smb_state$current_file = c$smb_state$fid_map[response$file_id$persistent+response$file_id$volatile];
}
@ -193,7 +193,7 @@ event smb2_read_request(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, o
}
event smb2_read_request(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, offset: count, length: count) &priority=-5
{
{
SMB::write_file_log(c$smb_state);
}
@ -249,7 +249,7 @@ event smb2_file_rename(c: connection, hdr: SMB2::Header, file_id: SMB2::GUID, ds
if ( c$smb_state$current_file?$name )
c$smb_state$current_file$prev_name = c$smb_state$current_file$name;
c$smb_state$current_file$name = dst_filename;
switch ( c$smb_state$current_tree$share_type )

View file

@ -31,7 +31,7 @@ export {
[23] = "LOCAL7",
[999] = "UNSPECIFIED",
} &default=function(c: count): string { return fmt("?-%d", c); };
## Mapping between the constants and string values for syslog severities.
const severity_codes: table[count] of string = {
[0] = "EMERG",

View file

@ -1,4 +1,4 @@
##! Core script support for logging syslog messages. This script represents
##! Core script support for logging syslog messages. This script represents
##! one syslog message as one logged record.
@load ./consts
@ -52,7 +52,7 @@ event syslog_message(c: connection, facility: count, severity: count, msg: strin
info$facility=facility_codes[facility];
info$severity=severity_codes[severity];
info$message=msg;
c$syslog = info;
}

View file

@ -3,16 +3,16 @@
module GLOBAL;
export {
## Takes a conn_id record and returns a string representation with the
## Takes a conn_id record and returns a string representation with the
## general data flow appearing to be from the connection originator
## on the left to the responder on the right.
global id_string: function(id: conn_id): string;
## Takes a conn_id record and returns a string representation with the
## Takes a conn_id record and returns a string representation with the
## general data flow appearing to be from the connection responder
## on the right to the originator on the left.
global reverse_id_string: function(id: conn_id): string;
## Calls :zeek:id:`id_string` or :zeek:id:`reverse_id_string` if the
## second argument is T or F, respectively.
global directed_id_string: function(id: conn_id, is_orig: bool): string;

View file

@ -58,7 +58,7 @@ type Host: enum {
function addr_matches_host(ip: addr, h: Host): bool
{
if ( h == NO_HOSTS ) return F;
return ( h == ALL_HOSTS ||
(h == LOCAL_HOSTS && Site::is_local_addr(ip)) ||
(h == REMOTE_HOSTS && !Site::is_local_addr(ip)) );

View file

@ -1,8 +1,7 @@
## Extract an integer from a string.
##
##
## s: The string to search for a number.
##
##
## get_first: Provide `F` if you would like the last number found.
##
## Returns: The request integer from the given string or 0 if

View file

@ -27,7 +27,7 @@ function set_to_regex(ss: set[string], pat: string): pattern
for ( s in ss )
{
local tmp_pattern = convert_for_pattern(s);
return_pat = ( i == 0 ) ?
return_pat = ( i == 0 ) ?
tmp_pattern : cat(tmp_pattern, "|", return_pat);
++i;
}

View file

@ -25,7 +25,7 @@ function join_string_set(ss: set[string], j: string): string
{
if ( i > 0 )
output = cat(output, j);
output = cat(output, s);
++i;
}

View file

@ -16,13 +16,13 @@ export {
## for.
index: count &default=0;
};
## The thresholds you would like to use as defaults with the
## The thresholds you would like to use as defaults with the
## :zeek:id:`default_check_threshold` function.
const default_notice_thresholds: vector of count = {
30, 100, 1000, 10000, 100000, 1000000, 10000000,
} &redef;
## This will check if a :zeek:type:`TrackCount` variable has crossed any
## thresholds in a given set.
##
@ -33,7 +33,7 @@ export {
##
## Returns: T if a threshold has been crossed, else F.
global check_threshold: function(v: vector of count, tracker: TrackCount): bool;
## This will use the :zeek:id:`default_notice_thresholds` variable to
## check a :zeek:type:`TrackCount` variable to see if it has crossed
## another threshold.

View file

@ -1,4 +1,3 @@
@load base/utils/dir
@load base/utils/paths
@ -255,7 +254,7 @@ event file_new(f: fa_file)
if ( |parts| == 3 )
file_dir = parts[0];
if ( (watch_file != "" && f$source == watch_file) ||
if ( (watch_file != "" && f$source == watch_file) ||
(watch_dir != "" && compress_path(watch_dir) == file_dir) )
{
Files::add_analyzer(f, Files::ANALYZER_UNIFIED2);

View file

@ -41,7 +41,7 @@ event Control::net_stats_response(s: string) &priority=-10
{
event terminate_event();
}
event Control::configuration_update_response() &priority=-10
{
event terminate_event();
@ -68,7 +68,7 @@ function configurable_ids(): id_table
# We don't want to update non-const globals because that's usually
# where state is stored and those values will frequently be declared
# with &redef so that attributes can be redefined.
#
#
# NOTE: functions are currently not fully supported for serialization and hence
# aren't sent.
if ( t$constant && t$redefinable && t$type_name != "func" )

View file

@ -24,6 +24,6 @@ event protocol_violation(c: connection, atype: Analyzer::Tag, aid: count,
reason: string) &priority=4
{
if ( ! c?$dpd ) return;
c$dpd$packet_segment=fmt("%s", sub_bytes(get_current_packet()$data, 0, packet_segment_size));
}

View file

@ -66,7 +66,7 @@ function do_mhr_lookup(hash: string, fi: Notice::FileInfo)
event file_hash(f: fa_file, kind: string, hash: string)
{
if ( kind == "sha1" && f?$info && f$info?$mime_type &&
if ( kind == "sha1" && f?$info && f$info?$mime_type &&
match_file_types in f$info$mime_type )
do_mhr_lookup(hash, Notice::create_file_info(f));
}

View file

@ -1,10 +1,9 @@
module Files;
export {
redef record Files::Info += {
## The information density of the contents of the file,
## expressed as a number of bits per character.
## The information density of the contents of the file,
## expressed as a number of bits per character.
entropy: double &log &optional;
};
}

View file

@ -7,6 +7,6 @@ event file_hash(f: fa_file, kind: string, hash: string)
$indicator_type=Intel::FILE_HASH,
$f=f,
$where=Files::IN_HASH);
Intel::seen(seen);
}

View file

@ -22,9 +22,8 @@ hook Intel::extend_match(info: Info, s: Seen, items: set[Item]) &priority=9
break;
}
}
if ( whitelisted )
# Prevent logging
break;
}

View file

@ -8,14 +8,14 @@
module Software;
export {
redef enum Notice::Type += {
redef enum Notice::Type += {
## For certain software, a version changing may matter. In that
## case, this notice will be generated. Software that matters
## if the version changes can be configured with the
## :zeek:id:`Software::interesting_version_changes` variable.
Software_Version_Change,
};
## Some software is more interesting when the version changes and this
## is a set of all software that should raise a notice when a different
## version is seen on a host.

View file

@ -8,7 +8,7 @@ module Barnyard2;
export {
redef enum Log::ID += { LOG };
global log_policy: Log::PolicyHook;
type Info: record {
@ -19,9 +19,9 @@ export {
## Associated alert data.
alert: AlertData &log;
};
## This can convert a Barnyard :zeek:type:`Barnyard2::PacketID` value to
## a :zeek:type:`conn_id` value in the case that you might need to index
## a :zeek:type:`conn_id` value in the case that you might need to index
## into an existing data structure elsewhere within Zeek.
global pid2cid: function(p: PacketID): conn_id;
}
@ -40,22 +40,22 @@ function pid2cid(p: PacketID): conn_id
event barnyard_alert(id: PacketID, alert: AlertData, msg: string, data: string)
{
Log::write(Barnyard2::LOG, [$ts=network_time(), $pid=id, $alert=alert]);
#local proto_connection_string: string;
#if ( id$src_p == 0/tcp )
# proto_connection_string = fmt("{PROTO:255} %s -> %s", id$src_ip, id$dst_ip);
#else
# proto_connection_string = fmt("{%s} %s:%d -> %s:%d",
# proto_connection_string = fmt("{%s} %s:%d -> %s:%d",
# to_upper(fmt("%s", get_port_transport_proto(id$dst_p))),
# id$src_ip, id$src_p, id$dst_ip, id$dst_p);
#
#local snort_alike_msg = fmt("%.6f [**] [%d:%d:%d] %s [**] [Classification: %s] [Priority: %d] %s",
#local snort_alike_msg = fmt("%.6f [**] [%d:%d:%d] %s [**] [Classification: %s] [Priority: %d] %s",
# sad$ts,
# sad$generator_id,
# sad$signature_id,
# sad$signature_revision,
# msg,
# sad$classification,
# sad$priority_id,
# msg,
# sad$classification,
# sad$priority_id,
# proto_connection_string);
}

View file

@ -23,7 +23,7 @@ export {
dst_p: port;
} &log;
## This is the event that Barnyard2 instances will send if they're
## This is the event that Barnyard2 instances will send if they're
## configured with the bro_alert output plugin.
global barnyard_alert: event(id: Barnyard2::PacketID,
alert: Barnyard2::AlertData,

View file

@ -6,7 +6,7 @@ module TrimTraceFile;
export {
## The interval between times that the output tracefile is rotated.
const trim_interval = 10 mins &redef;
## This event can be generated externally to this script if on-demand
## tracefile rotation is required with the caveat that the script
## doesn't currently attempt to get back on schedule automatically and
@ -19,14 +19,14 @@ event TrimTraceFile::go(first_trim: bool)
{
if ( zeek_is_terminating() || trace_output_file == "" )
return;
if ( ! first_trim )
{
local info = rotate_file_by_name(trace_output_file);
if ( info$old_name != "" )
system(fmt("/bin/rm %s", safe_shell_quote(info$new_name)));
}
schedule trim_interval { TrimTraceFile::go(F) };
}
@ -35,4 +35,3 @@ event zeek_init()
if ( trim_interval > 0 secs )
schedule trim_interval { TrimTraceFile::go(T) };
}

View file

@ -1,5 +1,5 @@
##! This script logs hosts that Zeek determines have performed complete TCP
##! handshakes and logs the address once per day (by default). The log that
##! handshakes and logs the address once per day (by default). The log that
##! is output provides an easy way to determine a count of the IP addresses in
##! use on a network per day.
@ -29,11 +29,11 @@ export {
## with keys uniformly distributed over proxy nodes in cluster
## operation.
const use_host_store = T &redef;
## The hosts whose existence should be logged and tracked.
## See :zeek:type:`Host` for possible choices.
option host_tracking = LOCAL_HOSTS;
## Holds the set of all known hosts. Keys in the store are addresses
## and their associated value will always be the "true" boolean.
global host_store: Cluster::StoreInfo;
@ -49,8 +49,8 @@ export {
## :zeek:see:`Known::host_store`.
option host_store_timeout = 15sec;
## The set of all known addresses to store for preventing duplicate
## logging of addresses. It can also be used from other scripts to
## The set of all known addresses to store for preventing duplicate
## logging of addresses. It can also be used from other scripts to
## inspect if an address has been seen in use.
## Maintain the list of known hosts for 24 hours so that the existence
## of each individual address is logged each day.

View file

@ -84,7 +84,7 @@ export {
}
redef record connection += {
# This field is to indicate whether or not the processing for detecting
# This field is to indicate whether or not the processing for detecting
# and logging the service for this connection is complete.
known_services_done: bool &default=F;
};
@ -314,4 +314,3 @@ event zeek_init() &priority=5
$path="known_services",
$policy=log_policy_services]);
}

View file

@ -1,6 +1,6 @@
##! This script detects names which are not within zones considered to be
##! local but resolving to addresses considered local.
##! The :zeek:id:`Site::local_zones` variable **must** be set appropriately for
##! local but resolving to addresses considered local.
##! The :zeek:id:`Site::local_zones` variable **must** be set appropriately for
##! this detection.
@load base/frameworks/notice
@ -9,7 +9,7 @@
module DNS;
export {
redef enum Notice::Type += {
redef enum Notice::Type += {
## Raised when a non-local name is found to be pointing at a
## local host. The :zeek:id:`Site::local_zones` variable
## **must** be set appropriately for this detection.
@ -21,7 +21,7 @@ event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priori
{
if ( |Site::local_zones| == 0 )
return;
# Check for responses from remote hosts that point at local hosts
# but the name is not considered to be within a "local" zone.
if ( Site::is_local_addr(a) && # referring to a local host
@ -29,7 +29,7 @@ event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priori
{
NOTICE([$note=External_Name,
$msg=fmt("%s is pointing to a local host - %s.", ans$query, a),
$conn=c,
$conn=c,
$identifier=cat(a,ans$query)]);
}
}

View file

@ -7,7 +7,7 @@ module FTP;
export {
redef enum Notice::Type += {
## Indicates that a successful response to a "SITE EXEC"
## Indicates that a successful response to a "SITE EXEC"
## command/arg pair was seen.
Site_Exec_Success,
};
@ -16,10 +16,10 @@ export {
event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool) &priority=3
{
local response_xyz = parse_ftp_reply_code(code);
# If a successful SITE EXEC command is executed, raise a notice.
if ( response_xyz$x == 2 &&
c$ftp$cmdarg$cmd == "SITE" &&
c$ftp$cmdarg$cmd == "SITE" &&
/[Ee][Xx][Ee][Cc]/ in c$ftp$cmdarg$arg )
{
NOTICE([$note=Site_Exec_Success, $conn=c,

View file

@ -26,7 +26,7 @@ export {
event signature_match(state: signature_state, msg: string, data: string) &priority=5
{
if ( /^webapp-/ !in state$sig_id ) return;
local c = state$conn;
local si: Software::Info;
si = [$name=msg, $unparsed_version=msg, $host=c$id$resp_h, $host_p=c$id$resp_p, $software_type=WEB_APPLICATION];

View file

@ -11,15 +11,15 @@ export {
## The vector of HTTP header names sent by the client. No
## header values are included here, just the header names.
client_header_names: vector of string &log &optional;
## The vector of HTTP header names sent by the server. No
## header values are included here, just the header names.
server_header_names: vector of string &log &optional;
};
## A boolean value to determine if client header names are to be logged.
option log_client_header_names = T;
## A boolean value to determine if server header names are to be logged.
option log_server_header_names = F;
}

View file

@ -1,4 +1,4 @@
##! Extracts and logs variables from the requested URI in the default HTTP
##! Extracts and logs variables from the requested URI in the default HTTP
##! logging stream.
@load base/protocols/http

View file

@ -82,10 +82,10 @@ event modbus_read_holding_registers_response(c: connection, headers: ModbusHeade
if ( slave_regs[c$modbus$track_address]$value != registers[i] )
{
local delta = network_time() - slave_regs[c$modbus$track_address]$last_set;
event Modbus::changed_register(c, c$modbus$track_address,
event Modbus::changed_register(c, c$modbus$track_address,
slave_regs[c$modbus$track_address]$value, registers[i],
delta);
slave_regs[c$modbus$track_address]$last_set = network_time();
slave_regs[c$modbus$track_address]$value = registers[i];
}
@ -102,7 +102,7 @@ event modbus_read_holding_registers_response(c: connection, headers: ModbusHeade
event Modbus::changed_register(c: connection, register: count, old_val: count, new_val: count, delta: interval)
{
local rec: MemmapInfo = [$ts=network_time(), $uid=c$uid, $id=c$id,
local rec: MemmapInfo = [$ts=network_time(), $uid=c$uid, $id=c$id,
$register=register, $old_val=old_val, $new_val=new_val, $delta=delta];
Log::write(REGISTER_CHANGE_LOG, rec);
}

View file

@ -39,7 +39,7 @@ event smb1_message(c: connection, hdr: SMB1::Header, is_orig: bool) &priority=-5
if ( c$smb_state$current_cmd$status in SMB::ignored_command_statuses )
return;
if ( c$smb_state$current_cmd$command in SMB::deferred_logging_cmds )
return;

View file

@ -6,7 +6,7 @@
module SMTP;
export {
redef enum Notice::Type += {
redef enum Notice::Type += {
## An SMTP server sent a reply mentioning an SMTP block list.
Blocklist_Error_Message,
## The originator's address is seen in the block list error message.
@ -21,19 +21,19 @@ export {
/spamhaus\.org\//
| /sophos\.com\/security\//
| /spamcop\.net\/bl/
| /cbl\.abuseat\.org\//
| /sorbs\.net\//
| /cbl\.abuseat\.org\//
| /sorbs\.net\//
| /bsn\.borderware\.com\//
| /mail-abuse\.com\//
| /b\.barracudacentral\.com\//
| /psbl\.surriel\.com\//
| /antispam\.imp\.ch\//
| /psbl\.surriel\.com\//
| /antispam\.imp\.ch\//
| /dyndns\.com\/.*spam/
| /rbl\.knology\.net\//
| /intercept\.datapacket\.net\//
| /uceprotect\.net\//
| /hostkarma\.junkemailfilter\.com\//;
}
event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string,
@ -55,8 +55,8 @@ event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string,
note = Blocklist_Blocked_Host;
message = fmt("%s is on an SMTP block list", c$id$orig_h);
}
NOTICE([$note=note, $conn=c, $msg=message, $sub=msg,
NOTICE([$note=note, $conn=c, $msg=message, $sub=msg,
$identifier=cat(c$id$orig_h)]);
}
}

View file

@ -24,7 +24,7 @@ event log_smtp(rec: Info)
{
ip = rec$x_originating_ip;
loc = lookup_location(ip);
if ( (loc?$country_code &&
loc$country_code in suspicious_origination_countries) ||
ip in suspicious_origination_networks )

View file

@ -1,10 +1,10 @@
##! This script feeds software detected through email into the software
##! framework. Mail clients and webmail interfaces are the only thing
##! framework. Mail clients and webmail interfaces are the only thing
##! currently detected.
##!
##!
##! TODO:
##!
##! * Find some heuristic to determine if email was sent through
##! * Find some heuristic to determine if email was sent through
##! a MS Exchange webmail interface as opposed to a desktop client.
@load base/frameworks/software/main
@ -18,13 +18,13 @@ export {
MAIL_SERVER,
WEBMAIL_SERVER
};
redef record Info += {
## Boolean indicator of if the message was sent through a
## webmail interface.
is_webmail: bool &log &default=F;
};
## Assuming that local mail servers are more trustworthy with the
## headers they insert into message envelopes, this default makes Zeek
## not attempt to detect software in inbound message bodies. If mail
@ -34,15 +34,15 @@ export {
## incoming messages (network traffic originating from a non-local
## address), set this variable to EXTERNAL_HOSTS or ALL_HOSTS.
option detect_clients_in_messages_from = LOCAL_HOSTS;
## A regular expression to match USER-AGENT-like headers to find if a
## A regular expression to match USER-AGENT-like headers to find if a
## message was sent with a webmail interface.
option webmail_user_agents =
/^iPlanet Messenger/
/^iPlanet Messenger/
| /^Sun Java\(tm\) System Messenger Express/
| /\(IMP\)/ # Horde Internet Messaging Program
| /^SquirrelMail/
| /^NeoMail/
| /^NeoMail/
| /ZimbraWebClient/;
}
@ -66,12 +66,12 @@ event log_smtp(rec: Info)
{
s_type = WEBMAIL_SERVER;
# If the earliest received header indicates that the connection
# was via HTTP, then that likely means the actual mail software
# was via HTTP, then that likely means the actual mail software
# is installed on the second address in the path.
if ( rec?$first_received && /via HTTP/ in rec$first_received )
client_ip = rec$path[|rec$path|-2];
}
if ( addr_matches_host(rec$id$orig_h,
detect_clients_in_messages_from) )
{
@ -79,4 +79,3 @@ event log_smtp(rec: Info)
}
}
}

View file

@ -1,7 +1,7 @@
##! This script will generate a notice if an apparent SSH login originates
##! or heads to a host with a reverse hostname that looks suspicious. By
##! default, the regular expression to match "interesting" hostnames includes
##! names that are typically used for infrastructure hosts like nameservers,
##! This script will generate a notice if an apparent SSH login originates
##! or heads to a host with a reverse hostname that looks suspicious. By
##! default, the regular expression to match "interesting" hostnames includes
##! names that are typically used for infrastructure hosts like nameservers,
##! mail servers, web servers and ftp servers.
@load base/frameworks/notice
@ -15,7 +15,7 @@ export {
## :zeek:id:`SSH::interesting_hostnames` regular expression.
Interesting_Hostname_Login,
};
## Strange/bad host names to see successful SSH logins from or to.
option interesting_hostnames =
/^d?ns[0-9]*\./ |
@ -49,4 +49,3 @@ event ssh_auth_successful(c: connection, auth_method_none: bool)
check_ssh_hostname(c$id, c$uid, host);
}
}

View file

@ -1,4 +1,4 @@
##! Extracts SSH client and server information from SSH
##! Extracts SSH client and server information from SSH
##! connections and forwards it to the software framework.
@load base/frameworks/software

View file

@ -1,4 +1,4 @@
##! Generate notices when X.509 certificates over SSL/TLS are expired or
##! Generate notices when X.509 certificates over SSL/TLS are expired or
##! going to expire soon based on the date and time values stored within the
##! certificate.

View file

@ -12,13 +12,13 @@ export {
redef enum Log::ID += { CERTS_LOG };
global log_policy_certs: Log::PolicyHook;
type CertsInfo: record {
## The timestamp when the certificate was detected.
ts: time &log;
## The address that offered the certificate.
host: addr &log;
## If the certificate was handed out by a server, this is the
## If the certificate was handed out by a server, this is the
## port that the server was listening on.
port_num: port &log &optional;
## Certificate subject.
@ -28,7 +28,7 @@ export {
## Serial number for the certificate.
serial: string &log &optional;
};
## The certificates whose existence should be logged and tracked.
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS.
option cert_tracking = LOCAL_HOSTS;
@ -38,7 +38,7 @@ export {
## with keys uniformly distributed over proxy nodes in cluster
## operation.
const use_cert_store = T &redef;
type AddrCertHashPair: record {
host: addr;
hash: string;
@ -60,15 +60,15 @@ export {
## :zeek:see:`Known::cert_store`.
option cert_store_timeout = 15sec;
## The set of all known certificates to store for preventing duplicate
## logging. It can also be used from other scripts to
## inspect if a certificate has been seen in use. The string value
## The set of all known certificates to store for preventing duplicate
## logging. It can also be used from other scripts to
## inspect if a certificate has been seen in use. The string value
## in the set is for storing the DER formatted certificate' SHA1 hash.
##
## In cluster operation, this set is uniformly distributed across
## proxy nodes.
global certs: set[addr, string] &create_expire=1day &redef;
## Event that can be handled to access the loggable record as it is sent
## on to the logging framework.
global log_known_certs: event(rec: CertsInfo);

View file

@ -1,2 +1,2 @@
##! This loads the default tuning
##! This loads the default tuning
@load ./defaults

View file

@ -1,7 +1,7 @@
# Capture TCP fragments, but not UDP (or ICMP), since those are a lot more
# common due to high-volume, fragmenting protocols such as NFS :-(.
# This normally isn't used because of the default open packet filter
# This normally isn't used because of the default open packet filter
# but we set it anyway in case the user is using a packet filter.
# Note: This was removed because the default model now is to have a wide
# open packet filter.

View file

@ -1,5 +1,5 @@
##! This file is meant to print messages on stdout for settings that would be
##! good to set in most cases or other things that could be done to achieve
##! good to set in most cases or other things that could be done to achieve
##! better detection.
@load base/utils/site