Merge remote-tracking branch 'origin/topic/dnthayer/doc-changes-for-2.2'

Snapshotting the work in this branch. I'll merge it again later as we
get closer to the release.

* origin/topic/dnthayer/doc-changes-for-2.2: (29 commits)
  Add README files for base/protocols
  Fix incorrect uses of reST directives
  Fix typos and formatting in the BiFs docs
  Fix typos and formatting in the base/utils docs
  Fix typos and formatting in the other protocol docs
  Fix typos and formatting in the ssl protocol docs
  Fix typos and formatting in the http protocol docs
  Fix typos and formatting in the ftp protocol docs
  Fix typos and formatting in the dns protocol docs
  Fix typos and formatting in the dhcp protocol docs
  Adjust line numbers to match changes in conn/main.bro
  Fix typos and formatting in the conn protocol docs
  Update FreeBSD install instructions
  Improvements to file analysis docs
  Add README files for most Bro frameworks
  Fix typos and formatting in various other framework docs
  Fix typos and formatting in the software framework docs
  Fix typos and formatting in the sumstats docs
  Fix typos and formatting in the packet filter docs
  Fix typos and formatting in the logging framework docs
  ...
This commit is contained in:
Robin Sommer 2013-10-18 13:47:13 -07:00
commit 615cca8baa
132 changed files with 1001 additions and 695 deletions

View file

@ -0,0 +1,3 @@
The analyzer framework allows to dynamically enable or disable Bro's
protocol analyzers, as well as to manage the well-known ports which
automatically activate a particular analyzer for new connections.

View file

@ -5,8 +5,8 @@
##! particular analyzer for new connections.
##!
##! Protocol analyzers are identified by unique tags of type
##! :bro:type:`Analyzer::Tag`, such as :bro:enum:`Analyzer::ANALYZER_HTTP` and
##! :bro:enum:`Analyzer::ANALYZER_HTTP`. These tags are defined internally by
##! :bro:type:`Analyzer::Tag`, such as :bro:enum:`Analyzer::ANALYZER_HTTP`.
##! These tags are defined internally by
##! the analyzers themselves, and documented in their analyzer-specific
##! description along with the events that they generate.
@ -15,8 +15,8 @@
module Analyzer;
export {
## If true, all available analyzers are initially disabled at startup. One
## can then selectively enable them with
## If true, all available analyzers are initially disabled at startup.
## One can then selectively enable them with
## :bro:id:`Analyzer::enable_analyzer`.
global disable_all = F &redef;
@ -45,7 +45,7 @@ export {
##
## ports: The set of well-known ports to associate with the analyzer.
##
## Returns: True if the ports were sucessfully registered.
## Returns: True if the ports were successfully registered.
global register_for_ports: function(tag: Analyzer::Tag, ports: set[port]) : bool;
## Registers an individual well-known port for an analyzer. If a future
@ -57,7 +57,7 @@ export {
##
## p: The well-known port to associate with the analyzer.
##
## Returns: True if the port was sucessfully registered.
## Returns: True if the port was successfully registered.
global register_for_port: function(tag: Analyzer::Tag, p: port) : bool;
## Returns a set of all well-known ports currently registered for a
@ -88,8 +88,8 @@ export {
## Returns: The analyzer tag corresponding to the name.
global get_tag: function(name: string): Analyzer::Tag;
## Schedules an analyzer for a future connection originating from a given IP
## address and port.
## Schedules an analyzer for a future connection originating from a
## given IP address and port.
##
## orig: The IP address originating a connection in the future.
## 0.0.0.0 can be used as a wildcard to match any originator address.
@ -103,7 +103,7 @@ export {
## tout: A timeout interval after which the scheduling request will be
## discarded if the connection has not yet been seen.
##
## Returns: True if succesful.
## Returns: True if successful.
global schedule_analyzer: function(orig: addr, resp: addr, resp_p: port,
analyzer: Analyzer::Tag, tout: interval) : bool;

View file

@ -0,0 +1,2 @@
The cluster framework provides for establishing and controlling a cluster
of Bro instances.

View file

@ -39,7 +39,8 @@ export {
## The node type doing all the actual traffic analysis.
WORKER,
## A node acting as a traffic recorder using the
## `Time Machine <http://tracker.bro.org/time-machine>`_ software.
## `Time Machine <http://bro.org/community/time-machine.html>`_
## software.
TIME_MACHINE,
};
@ -58,7 +59,7 @@ export {
## Events raised by workers and handled by a manager.
const worker2manager_events = /(TimeMachine::command|Drop::.*)/ &redef;
## Events raised by workers and handled by proxies..
## Events raised by workers and handled by proxies.
const worker2proxy_events = /EMPTY/ &redef;
## Events raised by TimeMachine instances and handled by a manager.
@ -73,14 +74,14 @@ export {
## Record type to indicate a node in a cluster.
type Node: record {
## Identifies the type of cluster node in this node's configuration.
## Identifies the type of cluster node in this node's configuration.
node_type: NodeType;
## The IP address of the cluster node.
ip: addr;
## If the *ip* field is a non-global IPv6 address, this field
## can specify a particular :rfc:`4007` ``zone_id``.
zone_id: string &default="";
## The port to which the this local node can connect when
## The port to which this local node can connect when
## establishing communication.
p: port;
## Identifier for the interface a worker is sniffing.

View file

@ -19,6 +19,6 @@ redef Log::default_rotation_postprocessor_cmd = "delete-log";
## Record all packets into trace file.
##
## Note that this only indicates that *if* we are recording packets, we want all
## of them (rather than just those the core deems sufficiently important). Setting
## this does not turn recording on. Use '-w <trace>' for that.
## of them (rather than just those the core deems sufficiently important).
## Setting this does not turn recording on. Use '-w <trace>' for that.
redef record_all_packets = T;

View file

@ -0,0 +1,2 @@
The communication framework facilitates connecting to remote Bro or
Broccoli instances to share state and transfer events.

View file

@ -42,10 +42,11 @@ export {
type Info: record {
## The network time at which a communication event occurred.
ts: time &log;
## The peer name (if any) with which a communication event is concerned.
## The peer name (if any) with which a communication event is
## concerned.
peer: string &log &optional;
## Where the communication event message originated from, that is,
## either from the scripting layer or inside the Bro process.
## Where the communication event message originated from, that
## is, either from the scripting layer or inside the Bro process.
src_name: string &log &optional;
## .. todo:: currently unused.
connected_peer_desc: string &log &optional;
@ -71,8 +72,8 @@ export {
## can specify a particular :rfc:`4007` ``zone_id``.
zone_id: string &optional;
## Port of the remote Bro communication endpoint if we are initiating
## the connection based on the :bro:id:`connect` field.
## Port of the remote Bro communication endpoint if we are
## initiating the connection (based on the *connect* field).
p: port &optional;
## When accepting a connection, the configuration only
@ -87,7 +88,7 @@ export {
events: pattern &optional;
## Whether we are going to connect (rather than waiting
## for the other sie to connect to us).
## for the other side to connect to us).
connect: bool &default = F;
## If disconnected, reconnect after this many seconds.
@ -103,13 +104,14 @@ export {
request_logs: bool &default = F;
## When performing state synchronization, whether we consider
## our state to be authoritative. If so, we will send the peer
## our current set when the connection is set up.
## (Only one side can be authoritative)
## our state to be authoritative (only one side can be
## authoritative). If so, we will send the peer our current
## set when the connection is set up.
auth: bool &default = F;
## If not set, no capture filter is sent.
## If set to "", the default capture filter is sent.
## If set to an empty string, then the default capture filter
## is sent.
capture_filter: string &optional;
## Whether to use SSL-based communication.

View file

@ -0,0 +1,3 @@
The control framework provides the foundation for providing "commands"
that can be taken remotely at runtime to modify a running Bro instance
or collect information from the running instance.

View file

@ -57,7 +57,8 @@ export {
## Returns the current net_stats.
global net_stats_response: event(s: string);
## Inform the remote Bro instance that it's configuration may have been updated.
## Inform the remote Bro instance that it's configuration may have been
## updated.
global configuration_update_request: event();
## This event is a wrapper and alias for the
## :bro:id:`Control::configuration_update_request` event.

View file

@ -0,0 +1,2 @@
The DPD (dynamic protocol detection) activates port-independent protocol
detection and selectively disables analyzers if protocol violations occur.

View file

@ -0,0 +1,3 @@
The file analysis framework provides an interface for driving the analysis
of files, possibly independent of any network protocol over which they're
transported.

View file

@ -14,10 +14,11 @@ export {
LOG
};
## A structure which represents a desired type of file analysis.
## A structure which parameterizes a type of file analysis.
type AnalyzerArgs: record {
## An event which will be generated for all new file contents,
## chunk-wise. Used when *tag* is
## chunk-wise. Used when *tag* (in the
## :bro:see:`Files::add_analyzer` function) is
## :bro:see:`Files::ANALYZER_DATA_EVENT`.
chunk_event: event(f: fa_file, data: string, off: count) &optional;
@ -47,12 +48,12 @@ export {
## the data traveled to.
rx_hosts: set[addr] &log;
## Connection UIDS over which the file was transferred.
## Connection UIDs over which the file was transferred.
conn_uids: set[string] &log;
## An identification of the source of the file data. E.g. it may be
## a network protocol over which it was transferred, or a local file
## path which was read, or some other input source.
## An identification of the source of the file data. E.g. it
## may be a network protocol over which it was transferred, or a
## local file path which was read, or some other input source.
source: string &log &optional;
## A value to represent the depth of this file in relation
@ -64,9 +65,10 @@ export {
## A set of analysis types done during the file analysis.
analyzers: set[string] &log;
## A mime type provided by libmagic against the *bof_buffer*, or
## in the cases where no buffering of the beginning of file occurs,
## an initial guess of the mime type based on the first data seen.
## A mime type provided by libmagic against the *bof_buffer*
## field of :bro:see:`fa_file`, or in the cases where no
## buffering of the beginning of file occurs, an initial
## guess of the mime type based on the first data seen.
mime_type: string &log &optional;
## A filename for the file if one is available from the source
@ -79,12 +81,12 @@ export {
## If the source of this file is a network connection, this field
## indicates if the data originated from the local network or not as
## determined by the configured bro:see:`Site::local_nets`.
## determined by the configured :bro:see:`Site::local_nets`.
local_orig: bool &log &optional;
## If the source of this file is a network connection, this field
## indicates if the file is being sent by the originator of the connection
## or the responder.
## indicates if the file is being sent by the originator of the
## connection or the responder.
is_orig: bool &log &optional;
## Number of bytes provided to the file analysis engine for the file.
@ -116,15 +118,15 @@ export {
## The salt concatenated to unique file handle strings generated by
## :bro:see:`get_file_handle` before hashing them in to a file id
## (the *id* field of :bro:see:`fa_file`).
## Provided to help mitigate the possiblility of manipulating parts of
## Provided to help mitigate the possibility of manipulating parts of
## network connections that factor in to the file handle in order to
## generate two handles that would hash to the same file id.
const salt = "I recommend changing this." &redef;
## Sets the *timeout_interval* field of :bro:see:`fa_file`, which is
## used to determine the length of inactivity that is allowed for a file
## before internal state related to it is cleaned up. When used within a
## :bro:see:`file_timeout` handler, the analysis will delay timing out
## before internal state related to it is cleaned up. When used within
## a :bro:see:`file_timeout` handler, the analysis will delay timing out
## again for the period specified by *t*.
##
## f: the file.
@ -132,7 +134,7 @@ export {
## t: the amount of time the file can remain inactive before discarding.
##
## Returns: true if the timeout interval was set, or false if analysis
## for the *id* isn't currently active.
## for the file isn't currently active.
global set_timeout_interval: function(f: fa_file, t: interval): bool;
## Adds an analyzer to the analysis of a given file.
@ -144,7 +146,7 @@ export {
## args: any parameters the analyzer takes.
##
## Returns: true if the analyzer will be added, or false if analysis
## for the *id* isn't currently active or the *args*
## for the file isn't currently active or the *args*
## were invalid for the analyzer type.
global add_analyzer: function(f: fa_file,
tag: Files::Tag,
@ -154,10 +156,12 @@ export {
##
## f: the file.
##
## tag: the analyzer type.
##
## args: the analyzer (type and args) to remove.
##
## Returns: true if the analyzer will be removed, or false if analysis
## for the *id* isn't currently active.
## for the file isn't currently active.
global remove_analyzer: function(f: fa_file,
tag: Files::Tag,
args: AnalyzerArgs &default=AnalyzerArgs()): bool;
@ -167,11 +171,12 @@ export {
## f: the file.
##
## Returns: true if analysis for the given file will be ignored for the
## rest of it's contents, or false if analysis for the *id*
## rest of its contents, or false if analysis for the file
## isn't currently active.
global stop: function(f: fa_file): bool;
## Translates an file analyzer enum value to a string with the analyzer's name.
## Translates a file analyzer enum value to a string with the
## analyzer's name.
##
## tag: The analyzer tag.
##
@ -183,7 +188,7 @@ export {
##
## f: The file to be described.
##
## Returns a text description regarding metadata of the file.
## Returns: a text description regarding metadata of the file.
global describe: function(f: fa_file): string;
type ProtoRegistration: record {
@ -198,7 +203,7 @@ export {
&default=function(f: fa_file): string { return ""; };
};
## Register callbacks for protocols that work with the Files framework.
## Register callbacks for protocols that work with the Files framework.
## The callbacks must uniquely identify a file and each protocol can
## only have a single callback registered for it.
##
@ -209,10 +214,10 @@ export {
## Returns: true if the protocol being registered was not previously registered.
global register_protocol: function(tag: Analyzer::Tag, reg: ProtoRegistration): bool;
## Register a callback for file analyzers to use if they need to do some manipulation
## when they are being added to a file before the core code takes over. This is
## unlikely to be interesting for users and should only be called by file analyzer
## authors but it *not required*.
## Register a callback for file analyzers to use if they need to do some
## manipulation when they are being added to a file before the core code
## takes over. This is unlikely to be interesting for users and should
## only be called by file analyzer authors but is *not required*.
##
## tag: Tag for the file analyzer.
##

View file

@ -0,0 +1,2 @@
The input framework provides a way to read previously stored data either as
an event stream or into a Bro table.

View file

@ -33,45 +33,45 @@ export {
## that contain types that are not supported (at the moment
## file and function). If true, the input framework will
## warn in these cases, but continue. If false, it will
## abort. Defaults to false (abort)
## abort. Defaults to false (abort).
const accept_unsupported_types = F &redef;
## TableFilter description type used for the `table` method.
type TableDescription: record {
## Common definitions for tables and events
# Common definitions for tables and events
## String that allows the reader to find the source.
## For `READER_ASCII`, this is the filename.
source: string;
## Reader to use for this stream
## Reader to use for this stream.
reader: Reader &default=default_reader;
## Read mode to use for this stream
## Read mode to use for this stream.
mode: Mode &default=default_mode;
## Descriptive name. Used to remove a stream at a later time
## Descriptive name. Used to remove a stream at a later time.
name: string;
# Special definitions for tables
## Table which will receive the data read by the input framework
## Table which will receive the data read by the input framework.
destination: any;
## Record that defines the values used as the index of the table
## Record that defines the values used as the index of the table.
idx: any;
## Record that defines the values used as the elements of the table
## If val is undefined, destination has to be a set.
## Record that defines the values used as the elements of the table.
## If this is undefined, then *destination* has to be a set.
val: any &optional;
## Defines if the value of the table is a record (default), or a single value. Val
## can only contain one element when this is set to false.
## Defines if the value of the table is a record (default), or a single value.
## When this is set to false, then *val* can only contain one element.
want_record: bool &default=T;
## The event that is raised each time a value is added to, changed in or removed
## from the table. The event will receive an Input::Event enum as the first
## argument, the idx record as the second argument and the value (record) as the
## argument, the *idx* record as the second argument and the value (record) as the
## third argument.
ev: any &optional; # event containing idx, val as values.
@ -88,19 +88,19 @@ export {
## EventFilter description type used for the `event` method.
type EventDescription: record {
## Common definitions for tables and events
# Common definitions for tables and events
## String that allows the reader to find the source.
## For `READER_ASCII`, this is the filename.
source: string;
## Reader to use for this steam
## Reader to use for this stream.
reader: Reader &default=default_reader;
## Read mode to use for this stream
## Read mode to use for this stream.
mode: Mode &default=default_mode;
## Descriptive name. Used to remove a stream at a later time
## Descriptive name. Used to remove a stream at a later time.
name: string;
# Special definitions for events
@ -108,8 +108,8 @@ export {
## Record describing the fields to be retrieved from the source input.
fields: any;
## If want_record if false, the event receives each value in fields as a separate argument.
## If it is set to true (default), the event receives all fields in a single record value.
## If this is false, the event receives each value in fields as a separate argument.
## If this is set to true (default), the event receives all fields in a single record value.
want_record: bool &default=T;
## The event that is raised each time a new line is received from the reader.
@ -122,23 +122,23 @@ export {
config: table[string] of string &default=table();
};
## A file analyis input stream type used to forward input data to the
## A file analysis input stream type used to forward input data to the
## file analysis framework.
type AnalysisDescription: record {
## String that allows the reader to find the source.
## For `READER_ASCII`, this is the filename.
source: string;
## Reader to use for this steam. Compatible readers must be
## Reader to use for this stream. Compatible readers must be
## able to accept a filter of a single string type (i.e.
## they read a byte stream).
reader: Reader &default=Input::READER_BINARY;
## Read mode to use for this stream
## Read mode to use for this stream.
mode: Mode &default=default_mode;
## Descriptive name that uniquely identifies the input source.
## Can be used used to remove a stream at a later time.
## Can be used to remove a stream at a later time.
## This will also be used for the unique *source* field of
## :bro:see:`fa_file`. Most of the time, the best choice for this
## field will be the same value as the *source* field.
@ -150,38 +150,44 @@ export {
config: table[string] of string &default=table();
};
## Create a new table input from a given source. Returns true on success.
## Create a new table input from a given source.
##
## description: `TableDescription` record describing the source.
##
## Returns: true on success.
global add_table: function(description: Input::TableDescription) : bool;
## Create a new event input from a given source. Returns true on success.
## Create a new event input from a given source.
##
## description: `TableDescription` record describing the source.
## description: `EventDescription` record describing the source.
##
## Returns: true on success.
global add_event: function(description: Input::EventDescription) : bool;
## Create a new file analysis input from a given source. Data read from
## the source is automatically forwarded to the file analysis framework.
##
## description: A record describing the source
## description: A record describing the source.
##
## Returns: true on sucess.
## Returns: true on success.
global add_analysis: function(description: Input::AnalysisDescription) : bool;
## Remove a input stream. Returns true on success and false if the named stream was
## not found.
## Remove an input stream.
##
## id: string value identifying the stream to be removed
## id: string value identifying the stream to be removed.
##
## Returns: true on success and false if the named stream was not found.
global remove: function(id: string) : bool;
## Forces the current input to be checked for changes.
## Returns true on success and false if the named stream was not found
##
## id: string value identifying the stream
## id: string value identifying the stream.
##
## Returns: true on success and false if the named stream was not found.
global force_update: function(id: string) : bool;
## Event that is called, when the end of a data source has been reached, including
## after an update.
## Event that is called when the end of a data source has been reached,
## including after an update.
global end_of_data: event(name: string, source:string);
}

View file

@ -6,11 +6,11 @@ module InputAscii;
export {
## Separator between fields.
## Please note that the separator has to be exactly one character long
## Please note that the separator has to be exactly one character long.
const separator = Input::separator &redef;
## Separator between set elements.
## Please note that the separator has to be exactly one character long
## Please note that the separator has to be exactly one character long.
const set_separator = Input::set_separator &redef;
## String to use for empty fields.

View file

@ -1,23 +1,23 @@
##! Interface for the ascii input reader.
##! Interface for the benchmark input reader.
module InputBenchmark;
export {
## multiplication factor for each second
## Multiplication factor for each second.
const factor = 1.0 &redef;
## spread factor between lines
## Spread factor between lines.
const spread = 0 &redef;
## spreading where usleep = 1000000 / autospread * num_lines
## Spreading where usleep = 1000000 / autospread * num_lines
const autospread = 0.0 &redef;
## addition factor for each heartbeat
## Addition factor for each heartbeat.
const addfactor = 0 &redef;
## stop spreading at x lines per heartbeat
## Stop spreading at x lines per heartbeat.
const stopspreadat = 0 &redef;
## 1 -> enable timed spreading
## 1 -> enable timed spreading.
const timedspread = 0.0 &redef;
}

View file

@ -4,14 +4,14 @@ module InputRaw;
export {
## Separator between input records.
## Please note that the separator has to be exactly one character long
## Please note that the separator has to be exactly one character long.
const record_separator = "\n" &redef;
## Event that is called when a process created by the raw reader exits.
##
## name: name of the input stream
## source: source of the input stream
## exit_code: exit code of the program, or number of the signal that forced the program to exit
## signal_exit: false when program exitted normally, true when program was forced to exit by a signal
## name: name of the input stream.
## source: source of the input stream.
## exit_code: exit code of the program, or number of the signal that forced the program to exit.
## signal_exit: false when program exited normally, true when program was forced to exit by a signal.
global process_finished: event(name: string, source:string, exit_code:count, signal_exit:bool);
}

View file

@ -0,0 +1,3 @@
The intelligence framework provides a way to store and query intelligence
data (such as IP addresses or strings). Metadata can also be associated
with the intelligence.

View file

@ -1,5 +1,5 @@
##! Cluster transparency support for the intelligence framework. This is mostly oriented
##! toward distributing intelligence information across clusters.
##! Cluster transparency support for the intelligence framework. This is mostly
##! oriented toward distributing intelligence information across clusters.
@load base/frameworks/cluster
@load ./input

View file

@ -4,7 +4,7 @@ module Intel;
export {
## Intelligence files that will be read off disk. The files are
## reread everytime they are updated so updates much be atomic with
## reread every time they are updated so updates must be atomic with
## "mv" instead of writing the file in place.
const read_files: set[string] = {} &redef;
}

View file

@ -1,6 +1,6 @@
##! The intelligence framework provides a way to store and query IP addresses,
##! and strings (with a str_type). Metadata can
##! also be associated with the intelligence like for making more informed
##! also be associated with the intelligence, like for making more informed
##! decisions about matching and handling of intelligence.
@load base/frameworks/notice
@ -14,7 +14,7 @@ export {
type Type: enum {
## An IP address.
ADDR,
## A complete URL without the prefix "http://".
## A complete URL without the prefix ``"http://"``.
URL,
## Software name.
SOFTWARE,
@ -24,21 +24,22 @@ export {
DOMAIN,
## A user name.
USER_NAME,
## File hash which is non-hash type specific. It's up to the user to query
## for any relevant hash types.
## File hash which is non-hash type specific. It's up to the
## user to query for any relevant hash types.
FILE_HASH,
## File names. Typically with protocols with definite indications
## of a file name.
## File name. Typically with protocols with definite
## indications of a file name.
FILE_NAME,
## Certificate SHA-1 hash.
CERT_HASH,
};
## Data about an :bro:type:`Intel::Item`
## Data about an :bro:type:`Intel::Item`.
type MetaData: record {
## An arbitrary string value representing the data source. Typically,
## the convention for this field will be the source name and feed name
## separated by a hyphen. For example: "source1-c&c".
## An arbitrary string value representing the data source.
## Typically, the convention for this field will be the source
## name and feed name separated by a hyphen.
## For example: "source1-c&c".
source: string;
## A freeform description for the data.
desc: string &optional;
@ -81,7 +82,7 @@ export {
where: Where &log;
## If the data was discovered within a connection, the
## connection record should go into get to give context to the data.
## connection record should go here to give context to the data.
conn: connection &optional;
## If the data was discovered within a file, the file record
@ -106,10 +107,12 @@ export {
## this is the uid for the file.
fuid: string &log &optional;
## A mime type if the intelligence hit is related to a file.
## If the $f field is provided this will be automatically filled out.
## If the $f field is provided this will be automatically filled
## out.
file_mime_type: string &log &optional;
## Frequently files can be "described" to give a bit more context.
## If the $f field is provided this field will be automatically filled out.
## If the $f field is provided this field will be automatically
## filled out.
file_desc: string &log &optional;
## Where the data was seen.
@ -125,13 +128,13 @@ export {
## it against known intelligence for matches.
global seen: function(s: Seen);
## Event to represent a match in the intelligence data from data that was seen.
## On clusters there is no assurance as to where this event will be generated
## so do not assume that arbitrary global state beyond the given data
## will be available.
## Event to represent a match in the intelligence data from data that
## was seen. On clusters there is no assurance as to where this event
## will be generated so do not assume that arbitrary global state beyond
## the given data will be available.
##
## This is the primary mechanism where a user will take actions based on data
## within the intelligence framework.
## This is the primary mechanism where a user will take actions based on
## data within the intelligence framework.
global match: event(s: Seen, items: set[Item]);
global log_intel: event(rec: Info);
@ -140,7 +143,7 @@ export {
# Internal handler for matches with no metadata available.
global match_no_items: event(s: Seen);
# Internal events for cluster data distribution
# Internal events for cluster data distribution.
global new_item: event(item: Item);
global updated_item: event(item: Item);

View file

@ -0,0 +1 @@
The logging framework provides a flexible key-value based logging interface.

View file

@ -1,6 +1,6 @@
##! The Bro logging interface.
##!
##! See :doc:`/frameworks/logging` for a introduction to Bro's
##! See :doc:`/frameworks/logging` for an introduction to Bro's
##! logging framework.
module Log;
@ -27,7 +27,7 @@ export {
const set_separator = "," &redef;
## String to use for empty fields. This should be different from
## *unset_field* to make the output non-ambigious.
## *unset_field* to make the output unambiguous.
## Can be overwritten by individual writers.
const empty_field = "(empty)" &redef;
@ -41,8 +41,8 @@ export {
columns: any;
## Event that will be raised once for each log entry.
## The event receives a single same parameter, an instance of type
## ``columns``.
## The event receives a single same parameter, an instance of
## type ``columns``.
ev: any &optional;
};
@ -114,7 +114,7 @@ export {
##
## The specific interpretation of the string is up to
## the used writer, and may for example be the destination
## file name. Generally, filenames are expected to given
## file name. Generally, filenames are expected to be given
## without any extensions; writers will add appropiate
## extensions automatically.
##
@ -126,34 +126,36 @@ export {
path: string &optional;
## A function returning the output path for recording entries
## matching this filter. This is similar to ``path`` yet allows
## matching this filter. This is similar to *path* yet allows
## to compute the string dynamically. It is ok to return
## different strings for separate calls, but be careful: it's
## easy to flood the disk by returning a new string for each
## connection ...
## connection.
##
## id: The ID associated with the log stream.
##
## path: A suggested path value, which may be either the filter's
## ``path`` if defined, else a previous result from the function.
## If no ``path`` is defined for the filter, then the first call
## to the function will contain an empty string.
## ``path`` if defined, else a previous result from the
## function. If no ``path`` is defined for the filter,
## then the first call to the function will contain an
## empty string.
##
## rec: An instance of the streams's ``columns`` type with its
## fields set to the values to be logged.
##
## Returns: The path to be used for the filter, which will be subject
## to the same automatic correction rules as the *path*
## field of :bro:type:`Log::Filter` in the case of conflicts
## with other filters trying to use the same writer/path pair.
## Returns: The path to be used for the filter, which will be
## subject to the same automatic correction rules as
## the *path* field of :bro:type:`Log::Filter` in the
## case of conflicts with other filters trying to use
## the same writer/path pair.
path_func: function(id: ID, path: string, rec: any): string &optional;
## Subset of column names to record. If not given, all
## columns are recorded.
include: set[string] &optional;
## Subset of column names to exclude from recording. If not given,
## all columns are recorded.
## Subset of column names to exclude from recording. If not
## given, all columns are recorded.
exclude: set[string] &optional;
## If true, entries are recorded locally.
@ -229,7 +231,7 @@ export {
##
## filter: A record describing the desired logging parameters.
##
## Returns: True if the filter was sucessfully added, false if
## Returns: True if the filter was successfully added, false if
## the filter was not added or the *filter* argument was not
## the correct type.
##
@ -277,7 +279,7 @@ export {
##
## Returns: True if the stream was found and no error occurred in writing
## to it or if the stream was disabled and nothing was written.
## False if the stream was was not found, or the *columns*
## False if the stream was not found, or the *columns*
## argument did not match what the stream was initially defined
## to handle, or one of the stream's filters has an invalid
## ``path_func``.
@ -286,8 +288,8 @@ export {
global write: function(id: ID, columns: any) : bool;
## Sets the buffering status for all the writers of a given logging stream.
## A given writer implementation may or may not support buffering and if it
## doesn't then toggling buffering with this function has no effect.
## A given writer implementation may or may not support buffering and if
## it doesn't then toggling buffering with this function has no effect.
##
## id: The ID associated with a logging stream for which to
## enable/disable buffering.
@ -347,7 +349,7 @@ export {
##
## npath: The new path of the file (after already being rotated/processed
## by writer-specific postprocessor as defined in
## :bro:id:`Log::default_rotation_postprocessors`.
## :bro:id:`Log::default_rotation_postprocessors`).
##
## Returns: True when :bro:id:`Log::default_rotation_postprocessor_cmd`
## is empty or the system command given by it has been invoked

View file

@ -16,9 +16,9 @@
module Log;
export {
## Secure-copies the rotated-log to all the remote hosts
## Secure-copies the rotated log to all the remote hosts
## defined in :bro:id:`Log::scp_destinations` and then deletes
## the local copy of the rotated-log. It's not active when
## the local copy of the rotated log. It's not active when
## reading from trace files.
##
## info: A record holding meta-information about the log file to be
@ -42,9 +42,9 @@ export {
};
## A table indexed by a particular log writer and filter path, that yields
## a set remote destinations. The :bro:id:`Log::scp_postprocessor`
## a set of remote destinations. The :bro:id:`Log::scp_postprocessor`
## function queries this table upon log rotation and performs a secure
## copy of the rotated-log to each destination in the set. This
## copy of the rotated log to each destination in the set. This
## table can be modified at run-time.
global scp_destinations: table[Writer, string] of set[SCPDestination];

View file

@ -16,9 +16,9 @@
module Log;
export {
## Securely transfers the rotated-log to all the remote hosts
## Securely transfers the rotated log to all the remote hosts
## defined in :bro:id:`Log::sftp_destinations` and then deletes
## the local copy of the rotated-log. It's not active when
## the local copy of the rotated log. It's not active when
## reading from trace files.
##
## info: A record holding meta-information about the log file to be
@ -42,9 +42,9 @@ export {
};
## A table indexed by a particular log writer and filter path, that yields
## a set remote destinations. The :bro:id:`Log::sftp_postprocessor`
## a set of remote destinations. The :bro:id:`Log::sftp_postprocessor`
## function queries this table upon log rotation and performs a secure
## transfer of the rotated-log to each destination in the set. This
## transfer of the rotated log to each destination in the set. This
## table can be modified at run-time.
global sftp_destinations: table[Writer, string] of set[SFTPDestination];

View file

@ -2,10 +2,10 @@
##! to tweak the output format of ASCII logs.
##!
##! The ASCII writer supports currently one writer-specific filter option via
##! ``config``: setting ``tsv`` to the string ``T`` turns the output into into
##! "tab-separated-value" mode where only a single header row with the column names
##! is printed out as meta information, with no "# fields" prepended; no other meta
##! data gets included in that mode.
##! ``config``: setting ``tsv`` to the string ``T`` turns the output into
##! "tab-separated-value" mode where only a single header row with the column
##! names is printed out as meta information, with no "# fields" prepended; no
##! other meta data gets included in that mode.
##!
##! Example filter using this::
##!
@ -19,9 +19,9 @@ export {
## into files. This is primarily for debugging purposes.
const output_to_stdout = F &redef;
## If true, include lines with log meta information such as column names with
## types, the values of ASCII logging options that in use, and the time when the
## file was opened and closes (the latter at the end).
## If true, include lines with log meta information such as column names
## with types, the values of ASCII logging options that are in use, and
## the time when the file was opened and closed (the latter at the end).
const include_meta = T &redef;
## Prefix for lines with meta information.
@ -34,7 +34,7 @@ export {
const set_separator = Log::set_separator &redef;
## String to use for empty fields. This should be different from
## *unset_field* to make the output non-ambigious.
## *unset_field* to make the output unambiguous.
const empty_field = Log::empty_field &redef;
## String to use for an unset &optional field.

View file

@ -6,16 +6,16 @@ export {
## Compression to use with the DS output file. Options are:
##
## 'none' -- No compression.
## 'lzf' -- LZF compression. Very quick, but leads to larger output files.
## 'lzo' -- LZO compression. Very fast decompression times.
## 'gz' -- GZIP compression. Slower than LZF, but also produces smaller output.
## 'bz2' -- BZIP2 compression. Slower than GZIP, but also produces smaller output.
## 'lzf' -- LZF compression (very quick, but leads to larger output files).
## 'lzo' -- LZO compression (very fast decompression times).
## 'gz' -- GZIP compression (slower than LZF, but also produces smaller output).
## 'bz2' -- BZIP2 compression (slower than GZIP, but also produces smaller output).
const compression = "gz" &redef;
## The extent buffer size.
## Larger values here lead to better compression and more efficient writes, but
## also increase the lag between the time events are received and the time they
## are actually written to disk.
## Larger values here lead to better compression and more efficient writes,
## but also increase the lag between the time events are received and
## the time they are actually written to disk.
const extent_size = 65536 &redef;
## Should we dump the XML schema we use for this DS file to disk?
@ -43,8 +43,8 @@ export {
}
# Default function to postprocess a rotated DataSeries log file. It moves the
# rotated file to a new name that includes a timestamp with the opening time, and
# then runs the writer's default postprocessor command on it.
# rotated file to a new name that includes a timestamp with the opening time,
# and then runs the writer's default postprocessor command on it.
function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool
{
# Move file to name including both opening and closing time.

View file

@ -10,16 +10,16 @@
module LogElasticSearch;
export {
## Name of the ES cluster
## Name of the ES cluster.
const cluster_name = "elasticsearch" &redef;
## ES Server
## ES server.
const server_host = "127.0.0.1" &redef;
## ES Port
## ES port.
const server_port = 9200 &redef;
## Name of the ES index
## Name of the ES index.
const index_prefix = "bro" &redef;
## The ES type prefix comes before the name of the related log.
@ -27,9 +27,9 @@ export {
const type_prefix = "" &redef;
## The time before an ElasticSearch transfer will timeout. Note that
## the fractional part of the timeout will be ignored. In particular, time
## specifications less than a second result in a timeout value of 0, which
## means "no timeout."
## the fractional part of the timeout will be ignored. In particular,
## time specifications less than a second result in a timeout value of
## 0, which means "no timeout."
const transfer_timeout = 2secs;
## The batch size is the number of messages that will be queued up before

View file

@ -1,4 +1,4 @@
##! Interface for the None log writer. Thiis writer is mainly for debugging.
##! Interface for the None log writer. This writer is mainly for debugging.
module LogNone;

View file

@ -11,7 +11,7 @@ export {
const unset_field = Log::unset_field &redef;
## String to use for empty fields. This should be different from
## *unset_field* to make the output non-ambigious.
## *unset_field* to make the output unambiguous.
const empty_field = Log::empty_field &redef;
}

View file

@ -0,0 +1 @@
The packet filter framework supports how Bro sets its BPF capture filter.

View file

@ -1,4 +1,4 @@
##! This script supports how Bro sets it's BPF capture filter. By default
##! This script supports how Bro sets its BPF capture filter. By default
##! Bro sets a capture filter that allows all traffic. If a filter
##! is set on the command line, that filter takes precedence over the default
##! open filter and all filters defined in Bro scripts with the
@ -19,7 +19,7 @@ export {
## This notice is generated if a packet filter cannot be compiled.
Compile_Failure,
## Generated if a packet filter is fails to install.
## Generated if a packet filter fails to install.
Install_Failure,
## Generated when a notice takes too long to compile.
@ -33,8 +33,8 @@ export {
ts: time &log;
## This is a string representation of the node that applied this
## packet filter. It's mostly useful in the context of dynamically
## changing filters on clusters.
## packet filter. It's mostly useful in the context of
## dynamically changing filters on clusters.
node: string &log &optional;
## The packet filter that is being set.
@ -48,27 +48,28 @@ export {
};
## The BPF filter that is used by default to define what traffic should
## be captured. Filters defined in :bro:id:`restrict_filters` will still
## be applied to reduce the captured traffic.
## be captured. Filters defined in :bro:id:`restrict_filters` will
## still be applied to reduce the captured traffic.
const default_capture_filter = "ip or not ip" &redef;
## Filter string which is unconditionally or'ed to the beginning of every
## dynamically built filter.
## Filter string which is unconditionally or'ed to the beginning of
## every dynamically built filter.
const unrestricted_filter = "" &redef;
## Filter string which is unconditionally and'ed to the beginning of every
## dynamically built filter. This is mostly used when a custom filter is being
## used but MPLS or VLAN tags are on the traffic.
## Filter string which is unconditionally and'ed to the beginning of
## every dynamically built filter. This is mostly used when a custom
## filter is being used but MPLS or VLAN tags are on the traffic.
const restricted_filter = "" &redef;
## The maximum amount of time that you'd like to allow for BPF filters to compile.
## If this time is exceeded, compensation measures may be taken by the framework
## to reduce the filter size. This threshold being crossed also results in
## the :bro:see:`PacketFilter::Too_Long_To_Compile_Filter` notice.
## to reduce the filter size. This threshold being crossed also results
## in the :bro:see:`PacketFilter::Too_Long_To_Compile_Filter` notice.
const max_filter_compile_time = 100msec &redef;
## Install a BPF filter to exclude some traffic. The filter should positively
## match what is to be excluded, it will be wrapped in a "not".
## Install a BPF filter to exclude some traffic. The filter should
## positively match what is to be excluded, it will be wrapped in
## a "not".
##
## filter_id: An arbitrary string that can be used to identify
## the filter.
@ -79,9 +80,9 @@ export {
## installed or not.
global exclude: function(filter_id: string, filter: string): bool;
## Install a temporary filter to traffic which should not be passed through
## the BPF filter. The filter should match the traffic you don't want
## to see (it will be wrapped in a "not" condition).
## Install a temporary filter to traffic which should not be passed
## through the BPF filter. The filter should match the traffic you
## don't want to see (it will be wrapped in a "not" condition).
##
## filter_id: An arbitrary string that can be used to identify
## the filter.
@ -125,7 +126,7 @@ global dynamic_restrict_filters: table[string] of string = {};
# install the filter.
global currently_building = F;
# Internal tracking for if the the filter being built has possibly been changed.
# Internal tracking for if the filter being built has possibly been changed.
global filter_changed = F;
global filter_plugins: set[FilterPlugin] = {};

View file

@ -13,7 +13,7 @@ export {
##
## num_parts: The number of parts the traffic should be split into.
##
## this_part: The part of the traffic this filter will accept. 0-based.
## this_part: The part of the traffic this filter will accept (0-based).
global sampling_filter: function(num_parts: count, this_part: count): string;
## Combines two valid BPF filter strings with a string based operator

View file

@ -7,7 +7,7 @@
##! :bro:see:`Reporter::errors_to_stderr`.
##!
##! Note that this framework deals with the handling of internally generated
##! reporter messages, for the interface in to actually creating interface
##! reporter messages, for the interface
##! into actually creating reporter messages from the scripting layer, use
##! the built-in functions in :doc:`/scripts/base/bif/reporter.bif`.

View file

@ -0,0 +1,4 @@
The software framework provides infrastructure for maintaining a table
of software versions seen on the network. The version parsing itself
is carried out by external protocol-specific scripts that feed into
this framework.

View file

@ -1,5 +1,5 @@
##! This script provides the framework for software version detection and
##! parsing but doesn't actually do any detection on it's own. It relys on
##! parsing but doesn't actually do any detection on it's own. It relies on
##! other protocol specific scripts to parse out software from the protocols
##! that they analyze. The entry point for providing new software detections
##! to this framework is through the :bro:id:`Software::found` function.
@ -23,15 +23,15 @@ export {
## A structure to represent the numeric version of software.
type Version: record {
## Major version number
## Major version number.
major: count &optional;
## Minor version number
## Minor version number.
minor: count &optional;
## Minor subversion number
## Minor subversion number.
minor2: count &optional;
## Minor updates number
## Minor updates number.
minor3: count &optional;
## Additional version string (e.g. "beta42")
## Additional version string (e.g. "beta42").
addl: string &optional;
} &log;
@ -41,7 +41,8 @@ export {
ts: time &log &optional;
## The IP address detected running the software.
host: addr &log;
## The Port on which the software is running. Only sensible for server software.
## The port on which the software is running. Only sensible for
## server software.
host_p: port &log &optional;
## The type of software detected (e.g. :bro:enum:`HTTP::SERVER`).
software_type: Type &log &default=UNKNOWN;
@ -49,9 +50,9 @@ export {
name: string &log &optional;
## Version of the software.
version: Version &log &optional;
## The full unparsed version string found because the version parsing
## doesn't always work reliably in all cases and this acts as a
## fallback in the logs.
## The full unparsed version string found because the version
## parsing doesn't always work reliably in all cases and this
## acts as a fallback in the logs.
unparsed_version: string &log &optional;
## This can indicate that this software being detected should
@ -59,13 +60,13 @@ export {
## default, only software that is "interesting" due to a change
## in version or it being currently unknown is sent to the
## logging framework. This can be set to T to force the record
## to be sent to the logging framework if some amount of this tracking
## needs to happen in a specific way to the software.
## to be sent to the logging framework if some amount of this
## tracking needs to happen in a specific way to the software.
force_log: bool &default=F;
};
## Hosts whose software should be detected and tracked.
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS.
const asset_tracking = LOCAL_HOSTS &redef;
## Other scripts should call this function when they detect software.
@ -79,14 +80,14 @@ export {
## Compare two version records.
##
## Returns: -1 for v1 < v2, 0 for v1 == v2, 1 for v1 > v2.
## If the numerical version numbers match, the addl string
## If the numerical version numbers match, the *addl* string
## is compared lexicographically.
global cmp_versions: function(v1: Version, v2: Version): int;
## Type to represent a collection of :bro:type:`Software::Info` records.
## It's indexed with the name of a piece of software such as "Firefox"
## and it yields a :bro:type:`Software::Info` record with more information
## about the software.
## and it yields a :bro:type:`Software::Info` record with more
## information about the software.
type SoftwareSet: table[string] of Info;
## The set of software associated with an address. Data expires from

View file

@ -0,0 +1,2 @@
The summary statistics framework provides a way to summarize large streams
of data into simple reduced measurements.

View file

@ -1,6 +1,6 @@
##! This implements transparent cluster support for the SumStats framework.
##! Do not load this file directly. It's only meant to be loaded automatically
##! and will be depending on if the cluster framework has been enabled.
##! and will be if the cluster framework has been enabled.
##! The goal of this script is to make sumstats calculation completely and
##! transparently automated when running on a cluster.
@ -10,31 +10,32 @@
module SumStats;
export {
## The percent of the full threshold value that needs to be met on a single worker
## for that worker to send the value to its manager in order for it to request a
## global view for that value. There is no requirement that the manager requests
## a global view for the key since it may opt not to if it requested a global view
## for the key recently.
## The percent of the full threshold value that needs to be met on a
## single worker for that worker to send the value to its manager in
## order for it to request a global view for that value. There is no
## requirement that the manager requests a global view for the key since
## it may opt not to if it requested a global view for the key recently.
const cluster_request_global_view_percent = 0.2 &redef;
## This is to deal with intermediate update overload. A manager will only allow
## this many intermediate update requests to the workers to be inflight at any
## given time. Requested intermediate updates are currently thrown out and not
## performed. In practice this should hopefully have a minimal effect.
## This is to deal with intermediate update overload. A manager will
## only allow this many intermediate update requests to the workers to
## be inflight at any given time. Requested intermediate updates are
## currently thrown out and not performed. In practice this should
## hopefully have a minimal effect.
const max_outstanding_global_views = 10 &redef;
## Event sent by the manager in a cluster to initiate the collection of values for
## a sumstat.
## Event sent by the manager in a cluster to initiate the collection of
## values for a sumstat.
global cluster_ss_request: event(uid: string, ss_name: string, cleanup: bool);
## Event sent by nodes that are collecting sumstats after receiving a request for
## the sumstat from the manager.
## Event sent by nodes that are collecting sumstats after receiving a
## request for the sumstat from the manager.
#global cluster_ss_response: event(uid: string, ss_name: string, data: ResultTable, done: bool, cleanup: bool);
## This event is sent by the manager in a cluster to initiate the collection of
## a single key value from a sumstat. It's typically used to get intermediate
## updates before the break interval triggers to speed detection of a value
## crossing a threshold.
## This event is sent by the manager in a cluster to initiate the
## collection of a single key value from a sumstat. It's typically used
## to get intermediate updates before the break interval triggers to
## speed detection of a value crossing a threshold.
global cluster_get_result: event(uid: string, ss_name: string, key: Key, cleanup: bool);
## This event is sent by nodes in response to a
@ -43,7 +44,7 @@ export {
## This is sent by workers to indicate that they crossed the percent
## of the current threshold by the percentage defined globally in
## :bro:id:`SumStats::cluster_request_global_view_percent`
## :bro:id:`SumStats::cluster_request_global_view_percent`.
global cluster_key_intermediate_response: event(ss_name: string, key: SumStats::Key);
## This event is scheduled internally on workers to send result chunks.

View file

@ -51,8 +51,8 @@ export {
## would like to accept the data being inserted.
pred: function(key: SumStats::Key, obs: SumStats::Observation): bool &optional;
## A function to normalize the key. This can be used to aggregate or
## normalize the entire key.
## A function to normalize the key. This can be used to
## aggregate or normalize the entire key.
normalize_key: function(key: SumStats::Key): Key &optional;
};
@ -91,28 +91,28 @@ export {
name: string;
## The interval at which this filter should be "broken"
## and the '$epoch_result' callback called. The
## and the *epoch_result* callback called. The
## results are also reset at this time so any threshold
## based detection needs to be set to a
## value that should be expected to happen within
## this epoch.
epoch: interval;
## The reducers for the SumStat
## The reducers for the SumStat.
reducers: set[Reducer];
## Provide a function to calculate a value from the
## :bro:see:`SumStats::Result` structure which will be used
## for thresholding.
## This is required if a $threshold value is given.
## This is required if a *threshold* value is given.
threshold_val: function(key: SumStats::Key, result: SumStats::Result): double &optional;
## The threshold value for calling the
## $threshold_crossed callback.
## *threshold_crossed* callback.
threshold: double &optional;
## A series of thresholds for calling the
## $threshold_crossed callback.
## *threshold_crossed* callback.
threshold_series: vector of double &optional;
## A callback that is called when a threshold is crossed.
@ -124,7 +124,7 @@ export {
epoch_result: function(ts: time, key: SumStats::Key, result: SumStats::Result) &optional;
## A callback that will be called when a single collection
## interval is completed. The ts value will be the time of
## interval is completed. The *ts* value will be the time of
## when the collection started.
epoch_finished: function(ts:time) &optional;
};

View file

@ -5,12 +5,12 @@ module SumStats;
export {
redef enum Calculation += {
## Keep last X observations in a queue
## Keep last X observations in a queue.
LAST
};
redef record Reducer += {
## number of elements to keep.
## Number of elements to keep.
num_last_elements: count &default=0;
};

View file

@ -4,7 +4,8 @@ module SumStats;
export {
redef enum Calculation += {
## Get uniquely distributed random samples from the observation stream.
## Get uniquely distributed random samples from the observation
## stream.
SAMPLE
};
@ -24,8 +25,8 @@ export {
redef record ResultVal += {
# Internal use only. This is not meant to be publically available
# and just a copy of num_samples from the Reducer. Needed for availability
# in the compose hook.
# and just a copy of num_samples from the Reducer. Needed for
# availability in the compose hook.
num_samples: count &default=0;
};

View file

@ -4,7 +4,7 @@ module SumStats;
export {
redef record Reducer += {
## number of elements to keep in the top-k list
## Number of elements to keep in the top-k list.
topk_size: count &default=500;
};

View file

@ -28,7 +28,7 @@ redef record ResultVal += {
# Internal use only. This is not meant to be publically available
# because we don't want to trust that we can inspect the values
# since we will like move to a probalistic data structure in the future.
# since we will likely move to a probabilistic data structure in the future.
# TODO: in the future this will optionally be a hyperloglog structure
unique_vals: set[Observation] &optional;
};

View file

@ -0,0 +1,2 @@
The tunnels framework handles the tracking/logging of tunnels (e.g. Teredo,
AYIYA, or IP-in-IP such as 6to4 where "IP" is either IPv4 or IPv6).

View file

@ -29,8 +29,8 @@ export {
## The unique identifier for the tunnel, which may correspond
## to a :bro:type:`connection`'s *uid* field for non-IP-in-IP tunnels.
## This is optional because there could be numerous connections
## for payload proxies like SOCKS but we should treat it as a single
## tunnel.
## for payload proxies like SOCKS but we should treat it as a
## single tunnel.
uid: string &log &optional;
## The tunnel "connection" 4-tuple of endpoint addresses/ports.
## For an IP tunnel, the ports will be 0.
@ -76,8 +76,8 @@ export {
## connections before it is considered inactive/expired.
const expiration_interval = 1hrs &redef;
## Currently active tunnels. That is, tunnels for which new, encapsulated
## connections have been seen in the interval indicated by
## Currently active tunnels. That is, tunnels for which new,
## encapsulated connections have been seen in the interval indicated by
## :bro:see:`Tunnel::expiration_interval`.
global active: table[conn_id] of Info = table() &read_expire=expiration_interval &expire_func=expire;
}