mirror of
https://github.com/zeek/zeek.git
synced 2025-10-03 15:18:20 +00:00
Merge remote-tracking branch 'origin/master' into topic/seth/files-tracking
Conflicts: scripts/base/frameworks/files/main.bro src/file_analysis/File.cc testing/btest/Baseline/scripts.base.frameworks.file-analysis.actions.data_event/out
This commit is contained in:
commit
42b2d56279
486 changed files with 106378 additions and 85985 deletions
|
@ -56,7 +56,7 @@ export {
|
|||
## local file path which was read, or some other input source.
|
||||
source: string &log &optional;
|
||||
|
||||
## A value to represent the depth of this file in relation
|
||||
## A value to represent the depth of this file in relation
|
||||
## to its source. In SMTP, it is the depth of the MIME
|
||||
## attachment on the message. In HTTP, it is the depth of the
|
||||
## request within the TCP connection.
|
||||
|
@ -73,7 +73,7 @@ export {
|
|||
mime_type: string &log &optional;
|
||||
|
||||
## A filename for the file if one is available from the source
|
||||
## for the file. These will frequently come from
|
||||
## for the file. These will frequently come from
|
||||
## "Content-Disposition" headers in network protocols.
|
||||
filename: string &log &optional;
|
||||
|
||||
|
@ -177,10 +177,19 @@ export {
|
|||
## Returns: true if the analyzer will be added, or false if analysis
|
||||
## for the file isn't currently active or the *args*
|
||||
## were invalid for the analyzer type.
|
||||
global add_analyzer: function(f: fa_file,
|
||||
tag: Files::Tag,
|
||||
global add_analyzer: function(f: fa_file,
|
||||
tag: Files::Tag,
|
||||
args: AnalyzerArgs &default=AnalyzerArgs()): bool;
|
||||
|
||||
## Adds all analyzers associated with a give MIME type to the analysis of
|
||||
## a file. Note that analyzers added via MIME types cannot take further
|
||||
## arguments.
|
||||
##
|
||||
## f: the file.
|
||||
##
|
||||
## mtype: the MIME type; it will be compared case-insensitive.
|
||||
global add_analyzers_for_mime_type: function(f: fa_file, mtype: string);
|
||||
|
||||
## Removes an analyzer from the analysis of a given file.
|
||||
##
|
||||
## f: the file.
|
||||
|
@ -224,7 +233,7 @@ export {
|
|||
## A callback to generate a file handle on demand when
|
||||
## one is needed by the core.
|
||||
get_file_handle: function(c: connection, is_orig: bool): string;
|
||||
|
||||
|
||||
## A callback to "describe" a file. In the case of an HTTP
|
||||
## transfer the most obvious description would be the URL.
|
||||
## It's like an extremely compressed version of the normal log.
|
||||
|
@ -235,7 +244,7 @@ export {
|
|||
## Register callbacks for protocols that work with the Files framework.
|
||||
## The callbacks must uniquely identify a file and each protocol can
|
||||
## only have a single callback registered for it.
|
||||
##
|
||||
##
|
||||
## tag: Tag for the protocol analyzer having a callback being registered.
|
||||
##
|
||||
## reg: A :bro:see:`Files::ProtoRegistration` record.
|
||||
|
@ -253,6 +262,42 @@ export {
|
|||
## callback: Function to execute when the given file analyzer is being added.
|
||||
global register_analyzer_add_callback: function(tag: Files::Tag, callback: function(f: fa_file, args: AnalyzerArgs));
|
||||
|
||||
## Registers a set of MIME types for an analyzer. If a future connection on one of
|
||||
## these types is seen, the analyzer will be automatically assigned to parsing it.
|
||||
## The function *adds* to all MIME types already registered, it doesn't replace
|
||||
## them.
|
||||
##
|
||||
## tag: The tag of the analyzer.
|
||||
##
|
||||
## mts: The set of MIME types, each in the form "foo/bar" (case-insensitive).
|
||||
##
|
||||
## Returns: True if the MIME types were successfully registered.
|
||||
global register_for_mime_types: function(tag: Analyzer::Tag, mts: set[string]) : bool;
|
||||
|
||||
## Registers a MIME type for an analyzer. If a future file with this type is seen,
|
||||
## the analyzer will be automatically assigned to parsing it. The function *adds*
|
||||
## to all MIME types already registered, it doesn't replace them.
|
||||
##
|
||||
## tag: The tag of the analyzer.
|
||||
##
|
||||
## mt: The MIME type in the form "foo/bar" (case-insensitive).
|
||||
##
|
||||
## Returns: True if the MIME type was successfully registered.
|
||||
global register_for_mime_type: function(tag: Analyzer::Tag, mt: string) : bool;
|
||||
|
||||
## Returns a set of all MIME types currently registered for a specific analyzer.
|
||||
##
|
||||
## tag: The tag of the analyzer.
|
||||
##
|
||||
## Returns: The set of MIME types.
|
||||
global registered_mime_types: function(tag: Analyzer::Tag) : set[string];
|
||||
|
||||
## Returns a table of all MIME-type-to-analyzer mappings currently registered.
|
||||
##
|
||||
## Returns: A table mapping each analyzer to the set of MIME types
|
||||
## registered for it.
|
||||
global all_registered_mime_types: function() : table[Analyzer::Tag] of set[string];
|
||||
|
||||
## Event that can be handled to access the Info record as it is sent on
|
||||
## to the logging framework.
|
||||
global log_files: event(rec: Info);
|
||||
|
@ -265,6 +310,9 @@ redef record fa_file += {
|
|||
# Store the callbacks for protocol analyzers that have files.
|
||||
global registered_protocols: table[Analyzer::Tag] of ProtoRegistration = table();
|
||||
|
||||
# Store the MIME type to analyzer mappings.
|
||||
global mime_types: table[Analyzer::Tag] of set[string];
|
||||
|
||||
global analyzer_add_callbacks: table[Files::Tag] of function(f: fa_file, args: AnalyzerArgs) = table();
|
||||
|
||||
event bro_init() &priority=5
|
||||
|
@ -287,13 +335,13 @@ function set_info(f: fa_file)
|
|||
f$info$source = f$source;
|
||||
f$info$duration = f$last_active - f$info$ts;
|
||||
f$info$seen_bytes = f$seen_bytes;
|
||||
if ( f?$total_bytes )
|
||||
if ( f?$total_bytes )
|
||||
f$info$total_bytes = f$total_bytes;
|
||||
f$info$missing_bytes = f$missing_bytes;
|
||||
f$info$overflow_bytes = f$overflow_bytes;
|
||||
if ( f?$is_orig )
|
||||
f$info$is_orig = f$is_orig;
|
||||
if ( f?$mime_type )
|
||||
if ( f?$mime_type )
|
||||
f$info$mime_type = f$mime_type;
|
||||
}
|
||||
|
||||
|
@ -332,6 +380,15 @@ function add_analyzer(f: fa_file, tag: Files::Tag, args: AnalyzerArgs): bool
|
|||
return T;
|
||||
}
|
||||
|
||||
function add_analyzers_for_mime_type(f: fa_file, mtype: string)
|
||||
{
|
||||
local dummy_args: AnalyzerArgs;
|
||||
local analyzers = __add_analyzers_for_mime_type(f$id, mtype, dummy_args);
|
||||
|
||||
for ( tag in analyzers )
|
||||
add f$info$analyzers[Files::analyzer_name(tag)];
|
||||
}
|
||||
|
||||
function register_analyzer_add_callback(tag: Files::Tag, callback: function(f: fa_file, args: AnalyzerArgs))
|
||||
{
|
||||
analyzer_add_callbacks[tag] = callback;
|
||||
|
@ -356,6 +413,9 @@ event file_new(f: fa_file) &priority=10
|
|||
{
|
||||
set_info(f);
|
||||
|
||||
if ( f?$mime_type )
|
||||
add_analyzers_for_mime_type(f, f$mime_type);
|
||||
|
||||
if ( enable_reassembler )
|
||||
{
|
||||
Files::enable_reassembly(f);
|
||||
|
@ -405,6 +465,41 @@ function register_protocol(tag: Analyzer::Tag, reg: ProtoRegistration): bool
|
|||
return result;
|
||||
}
|
||||
|
||||
function register_for_mime_types(tag: Analyzer::Tag, mime_types: set[string]) : bool
|
||||
{
|
||||
local rc = T;
|
||||
|
||||
for ( mt in mime_types )
|
||||
{
|
||||
if ( ! register_for_mime_type(tag, mt) )
|
||||
rc = F;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
function register_for_mime_type(tag: Analyzer::Tag, mt: string) : bool
|
||||
{
|
||||
if ( ! __register_for_mime_type(tag, mt) )
|
||||
return F;
|
||||
|
||||
if ( tag !in mime_types )
|
||||
mime_types[tag] = set();
|
||||
|
||||
add mime_types[tag][mt];
|
||||
return T;
|
||||
}
|
||||
|
||||
function registered_mime_types(tag: Analyzer::Tag) : set[string]
|
||||
{
|
||||
return tag in mime_types ? mime_types[tag] : set();
|
||||
}
|
||||
|
||||
function all_registered_mime_types(): table[Analyzer::Tag] of set[string]
|
||||
{
|
||||
return mime_types;
|
||||
}
|
||||
|
||||
function describe(f: fa_file): string
|
||||
{
|
||||
local tag = Analyzer::get_tag(f$source);
|
||||
|
|
|
@ -4,6 +4,17 @@
|
|||
module Input;
|
||||
|
||||
export {
|
||||
type Event: enum {
|
||||
EVENT_NEW = 0,
|
||||
EVENT_CHANGED = 1,
|
||||
EVENT_REMOVED = 2,
|
||||
};
|
||||
|
||||
type Mode: enum {
|
||||
MANUAL = 0,
|
||||
REREAD = 1,
|
||||
STREAM = 2
|
||||
};
|
||||
|
||||
## The default input reader used. Defaults to `READER_ASCII`.
|
||||
const default_reader = READER_ASCII &redef;
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
@load ./main
|
||||
@load ./postprocessors
|
||||
@load ./writers/ascii
|
||||
@load ./writers/dataseries
|
||||
@load ./writers/sqlite
|
||||
@load ./writers/elasticsearch
|
||||
@load ./writers/none
|
||||
|
|
|
@ -5,9 +5,15 @@
|
|||
|
||||
module Log;
|
||||
|
||||
# Log::ID and Log::Writer are defined in types.bif due to circular dependencies.
|
||||
|
||||
export {
|
||||
## Type that defines an ID unique to each log stream. Scripts creating new log
|
||||
## streams need to redef this enum to add their own specific log ID. The log ID
|
||||
## implicitly determines the default name of the generated log file.
|
||||
type Log::ID: enum {
|
||||
## Dummy place-holder.
|
||||
UNKNOWN
|
||||
};
|
||||
|
||||
## If true, local logging is by default enabled for all filters.
|
||||
const enable_local_logging = T &redef;
|
||||
|
||||
|
@ -27,13 +33,13 @@ export {
|
|||
const set_separator = "," &redef;
|
||||
|
||||
## String to use for empty fields. This should be different from
|
||||
## *unset_field* to make the output unambiguous.
|
||||
## *unset_field* to make the output unambiguous.
|
||||
## Can be overwritten by individual writers.
|
||||
const empty_field = "(empty)" &redef;
|
||||
|
||||
## String to use for an unset &optional field.
|
||||
## Can be overwritten by individual writers.
|
||||
const unset_field = "-" &redef;
|
||||
const unset_field = "-" &redef;
|
||||
|
||||
## Type defining the content of a logging stream.
|
||||
type Stream: record {
|
||||
|
|
|
@ -26,20 +26,20 @@ export {
|
|||
## This option is also available as a per-filter ``$config`` option.
|
||||
const use_json = F &redef;
|
||||
|
||||
## Format of timestamps when writing out JSON. By default, the JSON formatter will
|
||||
## use double values for timestamps which represent the number of seconds from the
|
||||
## UNIX epoch.
|
||||
## Format of timestamps when writing out JSON. By default, the JSON
|
||||
## formatter will use double values for timestamps which represent the
|
||||
## number of seconds from the UNIX epoch.
|
||||
const json_timestamps: JSON::TimestampFormat = JSON::TS_EPOCH &redef;
|
||||
|
||||
## If true, include lines with log meta information such as column names
|
||||
## with types, the values of ASCII logging options that are in use, and
|
||||
## the time when the file was opened and closed (the latter at the end).
|
||||
##
|
||||
##
|
||||
## If writing in JSON format, this is implicitly disabled.
|
||||
const include_meta = T &redef;
|
||||
|
||||
## Prefix for lines with meta information.
|
||||
##
|
||||
##
|
||||
## This option is also available as a per-filter ``$config`` option.
|
||||
const meta_prefix = "#" &redef;
|
||||
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
##! Interface for the DataSeries log writer.
|
||||
|
||||
module LogDataSeries;
|
||||
|
||||
export {
|
||||
## Compression to use with the DS output file. Options are:
|
||||
##
|
||||
## 'none' -- No compression.
|
||||
## 'lzf' -- LZF compression (very quick, but leads to larger output files).
|
||||
## 'lzo' -- LZO compression (very fast decompression times).
|
||||
## 'gz' -- GZIP compression (slower than LZF, but also produces smaller output).
|
||||
## 'bz2' -- BZIP2 compression (slower than GZIP, but also produces smaller output).
|
||||
const compression = "gz" &redef;
|
||||
|
||||
## The extent buffer size.
|
||||
## Larger values here lead to better compression and more efficient writes,
|
||||
## but also increase the lag between the time events are received and
|
||||
## the time they are actually written to disk.
|
||||
const extent_size = 65536 &redef;
|
||||
|
||||
## Should we dump the XML schema we use for this DS file to disk?
|
||||
## If yes, the XML schema shares the name of the logfile, but has
|
||||
## an XML ending.
|
||||
const dump_schema = F &redef;
|
||||
|
||||
## How many threads should DataSeries spawn to perform compression?
|
||||
## Note that this dictates the number of threads per log stream. If
|
||||
## you're using a lot of streams, you may want to keep this number
|
||||
## relatively small.
|
||||
##
|
||||
## Default value is 1, which will spawn one thread / stream.
|
||||
##
|
||||
## Maximum is 128, minimum is 1.
|
||||
const num_threads = 1 &redef;
|
||||
|
||||
## Should time be stored as an integer or a double?
|
||||
## Storing time as a double leads to possible precision issues and
|
||||
## can (significantly) increase the size of the resulting DS log.
|
||||
## That said, timestamps stored in double form are consistent
|
||||
## with the rest of Bro, including the standard ASCII log. Hence, we
|
||||
## use them by default.
|
||||
const use_integer_for_time = F &redef;
|
||||
}
|
||||
|
||||
# Default function to postprocess a rotated DataSeries log file. It moves the
|
||||
# rotated file to a new name that includes a timestamp with the opening time,
|
||||
# and then runs the writer's default postprocessor command on it.
|
||||
function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool
|
||||
{
|
||||
# Move file to name including both opening and closing time.
|
||||
local dst = fmt("%s.%s.ds", info$path,
|
||||
strftime(Log::default_rotation_date_format, info$open));
|
||||
|
||||
system(fmt("/bin/mv %s %s", info$fname, dst));
|
||||
|
||||
# Run default postprocessor.
|
||||
return Log::run_rotation_postprocessor_cmd(info, dst);
|
||||
}
|
||||
|
||||
redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func };
|
|
@ -1,48 +0,0 @@
|
|||
##! Log writer for sending logs to an ElasticSearch server.
|
||||
##!
|
||||
##! Note: This module is in testing and is not yet considered stable!
|
||||
##!
|
||||
##! There is one known memory issue. If your elasticsearch server is
|
||||
##! running slowly and taking too long to return from bulk insert
|
||||
##! requests, the message queue to the writer thread will continue
|
||||
##! growing larger and larger giving the appearance of a memory leak.
|
||||
|
||||
module LogElasticSearch;
|
||||
|
||||
export {
|
||||
## Name of the ES cluster.
|
||||
const cluster_name = "elasticsearch" &redef;
|
||||
|
||||
## ES server.
|
||||
const server_host = "127.0.0.1" &redef;
|
||||
|
||||
## ES port.
|
||||
const server_port = 9200 &redef;
|
||||
|
||||
## Name of the ES index.
|
||||
const index_prefix = "bro" &redef;
|
||||
|
||||
## The ES type prefix comes before the name of the related log.
|
||||
## e.g. prefix = "bro\_" would create types of bro_dns, bro_software, etc.
|
||||
const type_prefix = "" &redef;
|
||||
|
||||
## The time before an ElasticSearch transfer will timeout. Note that
|
||||
## the fractional part of the timeout will be ignored. In particular,
|
||||
## time specifications less than a second result in a timeout value of
|
||||
## 0, which means "no timeout."
|
||||
const transfer_timeout = 2secs;
|
||||
|
||||
## The batch size is the number of messages that will be queued up before
|
||||
## they are sent to be bulk indexed.
|
||||
const max_batch_size = 1000 &redef;
|
||||
|
||||
## The maximum amount of wall-clock time that is allowed to pass without
|
||||
## finishing a bulk log send. This represents the maximum delay you
|
||||
## would like to have with your logs before they are sent to ElasticSearch.
|
||||
const max_batch_interval = 1min &redef;
|
||||
|
||||
## The maximum byte size for a buffered JSON string to send to the bulk
|
||||
## insert API.
|
||||
const max_byte_size = 1024 * 1024 &redef;
|
||||
}
|
||||
|
|
@ -20,7 +20,8 @@ export {
|
|||
## category along with the specific notice separating words with
|
||||
## underscores and using leading capitals on each word except for
|
||||
## abbreviations which are kept in all capitals. For example,
|
||||
## SSH::Login is for heuristically guessed successful SSH logins.
|
||||
## SSH::Password_Guessing is for hosts that have crossed a threshold of
|
||||
## heuristically determined failed SSH logins.
|
||||
type Type: enum {
|
||||
## Notice reporting a count of how often a notice occurred.
|
||||
Tally,
|
||||
|
|
|
@ -71,7 +71,7 @@ export {
|
|||
## to be logged has occurred.
|
||||
ts: time &log;
|
||||
## A unique identifier of the connection which triggered the
|
||||
## signature match event
|
||||
## signature match event.
|
||||
uid: string &log &optional;
|
||||
## The host which triggered the signature match event.
|
||||
src_addr: addr &log &optional;
|
||||
|
|
|
@ -75,6 +75,13 @@ type addr_vec: vector of addr;
|
|||
## directly and then remove this alias.
|
||||
type table_string_of_string: table[string] of string;
|
||||
|
||||
## A set of file analyzer tags.
|
||||
##
|
||||
## .. todo:: We need this type definition only for declaring builtin functions
|
||||
## via ``bifcl``. We should extend ``bifcl`` to understand composite types
|
||||
## directly and then remove this alias.
|
||||
type files_tag_set: set[Files::Tag];
|
||||
|
||||
## A structure indicating a MIME type and strength of a match against
|
||||
## file magic signatures.
|
||||
##
|
||||
|
@ -2479,8 +2486,7 @@ type http_message_stat: record {
|
|||
header_length: count;
|
||||
};
|
||||
|
||||
## Maximum number of HTTP entity data delivered to events. The amount of data
|
||||
## can be limited for better performance, zero disables truncation.
|
||||
## Maximum number of HTTP entity data delivered to events.
|
||||
##
|
||||
## .. bro:see:: http_entity_data skip_http_entity_data skip_http_data
|
||||
global http_entity_data_delivery_size = 1500 &redef;
|
||||
|
@ -2732,6 +2738,7 @@ type ModbusRegisters: vector of count;
|
|||
type ModbusHeaders: record {
|
||||
tid: count;
|
||||
pid: count;
|
||||
len: count;
|
||||
uid: count;
|
||||
function_code: count;
|
||||
};
|
||||
|
@ -3357,9 +3364,6 @@ const global_hash_seed: string = "" &redef;
|
|||
## The maximum is currently 128 bits.
|
||||
const bits_per_uid: count = 96 &redef;
|
||||
|
||||
# Load BiFs defined by plugins.
|
||||
@load base/bif/plugins
|
||||
|
||||
# Load these frameworks here because they use fairly deep integration with
|
||||
# BiFs and script-land defined types.
|
||||
@load base/frameworks/logging
|
||||
|
@ -3368,3 +3372,7 @@ const bits_per_uid: count = 96 &redef;
|
|||
@load base/frameworks/files
|
||||
|
||||
@load base/bif
|
||||
|
||||
# Load BiFs defined by plugins.
|
||||
@load base/bif/plugins
|
||||
|
||||
|
|
|
@ -47,13 +47,13 @@ redef record connection += {
|
|||
const ports = { 67/udp, 68/udp };
|
||||
redef likely_server_ports += { 67/udp };
|
||||
|
||||
event bro_init()
|
||||
event bro_init() &priority=5
|
||||
{
|
||||
Log::create_stream(DHCP::LOG, [$columns=Info, $ev=log_dhcp]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_DHCP, ports);
|
||||
}
|
||||
|
||||
event dhcp_ack(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_list, lease: interval, serv_addr: addr, host_name: string)
|
||||
event dhcp_ack(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_list, lease: interval, serv_addr: addr, host_name: string) &priority=5
|
||||
{
|
||||
local info: Info;
|
||||
info$ts = network_time();
|
||||
|
@ -71,6 +71,9 @@ event dhcp_ack(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_lis
|
|||
info$assigned_ip = c$id$orig_h;
|
||||
|
||||
c$dhcp = info;
|
||||
}
|
||||
|
||||
event dhcp_ack(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_list, lease: interval, serv_addr: addr, host_name: string) &priority=-5
|
||||
{
|
||||
Log::write(DHCP::LOG, c$dhcp);
|
||||
}
|
||||
|
|
|
@ -58,31 +58,31 @@ export {
|
|||
## Indicates if client activity has been seen, but not yet logged.
|
||||
has_client_activity: bool &default=F;
|
||||
};
|
||||
|
||||
|
||||
type State: record {
|
||||
helo: string &optional;
|
||||
## Count the number of individual messages transmitted during
|
||||
## this SMTP session. Note, this is not the number of
|
||||
## recipients, but the number of message bodies transferred.
|
||||
messages_transferred: count &default=0;
|
||||
|
||||
|
||||
pending_messages: set[Info] &optional;
|
||||
};
|
||||
|
||||
|
||||
## Direction to capture the full "Received from" path.
|
||||
## REMOTE_HOSTS - only capture the path until an internal host is found.
|
||||
## LOCAL_HOSTS - only capture the path until the external host is discovered.
|
||||
## ALL_HOSTS - always capture the entire path.
|
||||
## NO_HOSTS - never capture the path.
|
||||
const mail_path_capture = ALL_HOSTS &redef;
|
||||
|
||||
|
||||
## Create an extremely shortened representation of a log line.
|
||||
global describe: function(rec: Info): string;
|
||||
|
||||
global log_smtp: event(rec: Info);
|
||||
}
|
||||
|
||||
redef record connection += {
|
||||
redef record connection += {
|
||||
smtp: Info &optional;
|
||||
smtp_state: State &optional;
|
||||
};
|
||||
|
@ -95,7 +95,7 @@ event bro_init() &priority=5
|
|||
Log::create_stream(SMTP::LOG, [$columns=SMTP::Info, $ev=log_smtp]);
|
||||
Analyzer::register_for_ports(Analyzer::ANALYZER_SMTP, ports);
|
||||
}
|
||||
|
||||
|
||||
function find_address_in_smtp_header(header: string): string
|
||||
{
|
||||
local ips = find_ip_addresses(header);
|
||||
|
@ -116,17 +116,17 @@ function new_smtp_log(c: connection): Info
|
|||
l$ts=network_time();
|
||||
l$uid=c$uid;
|
||||
l$id=c$id;
|
||||
# The messages_transferred count isn't incremented until the message is
|
||||
# The messages_transferred count isn't incremented until the message is
|
||||
# finished so we need to increment the count by 1 here.
|
||||
l$trans_depth = c$smtp_state$messages_transferred+1;
|
||||
|
||||
|
||||
if ( c$smtp_state?$helo )
|
||||
l$helo = c$smtp_state$helo;
|
||||
|
||||
|
||||
# The path will always end with the hosts involved in this connection.
|
||||
# The lower values in the vector are the end of the path.
|
||||
l$path = vector(c$id$resp_h, c$id$orig_h);
|
||||
|
||||
|
||||
return l;
|
||||
}
|
||||
|
||||
|
@ -134,7 +134,7 @@ function set_smtp_session(c: connection)
|
|||
{
|
||||
if ( ! c?$smtp_state )
|
||||
c$smtp_state = [];
|
||||
|
||||
|
||||
if ( ! c?$smtp )
|
||||
c$smtp = new_smtp_log(c);
|
||||
}
|
||||
|
@ -142,17 +142,17 @@ function set_smtp_session(c: connection)
|
|||
function smtp_message(c: connection)
|
||||
{
|
||||
if ( c$smtp$has_client_activity )
|
||||
{
|
||||
Log::write(SMTP::LOG, c$smtp);
|
||||
c$smtp = new_smtp_log(c);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
event smtp_request(c: connection, is_orig: bool, command: string, arg: string) &priority=5
|
||||
{
|
||||
set_smtp_session(c);
|
||||
local upper_command = to_upper(command);
|
||||
|
||||
if ( upper_command != "QUIT" )
|
||||
c$smtp$has_client_activity = T;
|
||||
|
||||
if ( upper_command == "HELO" || upper_command == "EHLO" )
|
||||
{
|
||||
c$smtp_state$helo = arg;
|
||||
|
@ -161,23 +161,28 @@ event smtp_request(c: connection, is_orig: bool, command: string, arg: string) &
|
|||
|
||||
else if ( upper_command == "RCPT" && /^[tT][oO]:/ in arg )
|
||||
{
|
||||
if ( ! c$smtp?$rcptto )
|
||||
if ( ! c$smtp?$rcptto )
|
||||
c$smtp$rcptto = set();
|
||||
add c$smtp$rcptto[split1(arg, /:[[:blank:]]*/)[2]];
|
||||
c$smtp$has_client_activity = T;
|
||||
}
|
||||
|
||||
else if ( upper_command == "MAIL" && /^[fF][rR][oO][mM]:/ in arg )
|
||||
{
|
||||
# Flush last message in case we didn't see the server's acknowledgement.
|
||||
smtp_message(c);
|
||||
|
||||
local partially_done = split1(arg, /:[[:blank:]]*/)[2];
|
||||
c$smtp$mailfrom = split1(partially_done, /[[:blank:]]?/)[1];
|
||||
c$smtp$has_client_activity = T;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string,
|
||||
msg: string, cont_resp: bool) &priority=5
|
||||
{
|
||||
set_smtp_session(c);
|
||||
|
||||
|
||||
# This continually overwrites, but we want the last reply,
|
||||
# so this actually works fine.
|
||||
c$smtp$last_reply = fmt("%d %s", code, msg);
|
||||
|
@ -198,7 +203,6 @@ event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string,
|
|||
event mime_one_header(c: connection, h: mime_header_rec) &priority=5
|
||||
{
|
||||
if ( ! c?$smtp ) return;
|
||||
c$smtp$has_client_activity = T;
|
||||
|
||||
if ( h$name == "MESSAGE-ID" )
|
||||
c$smtp$msg_id = h$value;
|
||||
|
@ -241,19 +245,19 @@ event mime_one_header(c: connection, h: mime_header_rec) &priority=5
|
|||
if ( 1 in addresses )
|
||||
c$smtp$x_originating_ip = to_addr(addresses[1]);
|
||||
}
|
||||
|
||||
|
||||
else if ( h$name == "X-MAILER" ||
|
||||
h$name == "USER-AGENT" ||
|
||||
h$name == "X-USER-AGENT" )
|
||||
c$smtp$user_agent = h$value;
|
||||
}
|
||||
|
||||
# This event handler builds the "Received From" path by reading the
|
||||
|
||||
# This event handler builds the "Received From" path by reading the
|
||||
# headers in the mail
|
||||
event mime_one_header(c: connection, h: mime_header_rec) &priority=3
|
||||
{
|
||||
# If we've decided that we're done watching the received headers for
|
||||
# whatever reason, we're done. Could be due to only watching until
|
||||
# whatever reason, we're done. Could be due to only watching until
|
||||
# local addresses are seen in the received from headers.
|
||||
if ( ! c?$smtp || h$name != "RECEIVED" || ! c$smtp$process_received_from )
|
||||
return;
|
||||
|
@ -263,7 +267,7 @@ event mime_one_header(c: connection, h: mime_header_rec) &priority=3
|
|||
return;
|
||||
local ip = to_addr(text_ip);
|
||||
|
||||
if ( ! addr_matches_host(ip, mail_path_capture) &&
|
||||
if ( ! addr_matches_host(ip, mail_path_capture) &&
|
||||
! Site::is_private_addr(ip) )
|
||||
{
|
||||
c$smtp$process_received_from = F;
|
||||
|
@ -281,7 +285,10 @@ event connection_state_remove(c: connection) &priority=-5
|
|||
event smtp_starttls(c: connection) &priority=5
|
||||
{
|
||||
if ( c?$smtp )
|
||||
{
|
||||
c$smtp$tls = T;
|
||||
c$smtp$has_client_activity = T;
|
||||
}
|
||||
}
|
||||
|
||||
function describe(rec: Info): string
|
||||
|
|
|
@ -26,6 +26,21 @@ export {
|
|||
const V2_CLIENT_MASTER_KEY = 302;
|
||||
const V2_SERVER_HELLO = 304;
|
||||
|
||||
## TLS Handshake types:
|
||||
const HELLO_REQUEST = 0;
|
||||
const CLIENT_HELLO = 1;
|
||||
const SERVER_HELLO = 2;
|
||||
const SESSION_TICKET = 4; # RFC 5077
|
||||
const CERTIFICATE = 11;
|
||||
const SERVER_KEY_EXCHANGE = 12;
|
||||
const CERTIFICATE_REQUEST = 13;
|
||||
const SERVER_HELLO_DONE = 14;
|
||||
const CERTIFICATE_VERIFY = 15;
|
||||
const CLIENT_KEY_EXCHANGE = 16;
|
||||
const FINISHED = 20;
|
||||
const CERTIFICATE_URL = 21; # RFC 3546
|
||||
const CERTIFICATE_STATUS = 22; # RFC 3546
|
||||
|
||||
## Mapping between numeric codes and human readable strings for alert
|
||||
## levels.
|
||||
const alert_levels: table[count] of string = {
|
||||
|
@ -94,6 +109,10 @@ export {
|
|||
[16] = "application_layer_protocol_negotiation",
|
||||
[17] = "status_request_v2",
|
||||
[18] = "signed_certificate_timestamp",
|
||||
[19] = "client_certificate_type",
|
||||
[20] = "server_certificate_type",
|
||||
[21] = "padding", # temporary till 2015-03-12
|
||||
[22] = "encrypt_then_mac", # temporary till 2015-06-05
|
||||
[35] = "SessionTicket TLS",
|
||||
[40] = "extended_random",
|
||||
[13172] = "next_protocol_negotiation",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue