mirror of
https://github.com/zeek/zeek.git
synced 2025-10-10 10:38:20 +00:00
Merge remote-tracking branch 'origin/topic/dnthayer/doc-fixes'
BIT-1484 #merged * origin/topic/dnthayer/doc-fixes: Fix documentation of encode/decode_base64 BiFs Update some doc tests and baselines Update and improve install instructions More improvements to input framework documentation Update install instructions for CAF Improve documentation of input framework Fixed some examples in "Writing Bro Scripts" doc Clarifications to the script reference docs Split long lines in input framework docs Update documentation of Conn::Info history field Minor clarifications and typo fixes in broker doc Remove unnecessary blank lines from some broker doc files Fix some doc build warnings Improve documentation of table and set types Fix typo in documentation of a field in connection record Significant improvements to the GeoLocation doc
This commit is contained in:
commit
5cd99a7f4b
38 changed files with 414 additions and 429 deletions
1
doc/components/bro-plugins/pf_ring/README.rst
Symbolic link
1
doc/components/bro-plugins/pf_ring/README.rst
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
../../../../aux/plugins/pf_ring/README
|
1
doc/components/bro-plugins/redis/README.rst
Symbolic link
1
doc/components/bro-plugins/redis/README.rst
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
../../../../aux/plugins/redis/README
|
|
@ -286,9 +286,9 @@ Activating a plugin will:
|
||||||
1. Load the dynamic module
|
1. Load the dynamic module
|
||||||
2. Make any bif items available
|
2. Make any bif items available
|
||||||
3. Add the ``scripts/`` directory to ``BROPATH``
|
3. Add the ``scripts/`` directory to ``BROPATH``
|
||||||
5. Load ``scripts/__preload__.bro``
|
4. Load ``scripts/__preload__.bro``
|
||||||
6. Make BiF elements available to scripts.
|
5. Make BiF elements available to scripts.
|
||||||
7. Load ``scripts/__load__.bro``
|
6. Load ``scripts/__load__.bro``
|
||||||
|
|
||||||
By default, Bro will automatically activate all dynamic plugins found
|
By default, Bro will automatically activate all dynamic plugins found
|
||||||
in its search path ``BRO_PLUGIN_PATH``. However, in bare mode (``bro
|
in its search path ``BRO_PLUGIN_PATH``. However, in bare mode (``bro
|
||||||
|
|
|
@ -9,10 +9,7 @@ Broker-Enabled Communication Framework
|
||||||
|
|
||||||
Bro can now use the `Broker Library
|
Bro can now use the `Broker Library
|
||||||
<../components/broker/README.html>`_ to exchange information with
|
<../components/broker/README.html>`_ to exchange information with
|
||||||
other Bro processes. To enable it run Bro's ``configure`` script
|
other Bro processes.
|
||||||
with the ``--enable-broker`` option. Note that a C++11 compatible
|
|
||||||
compiler (e.g. GCC 4.8+ or Clang 3.3+) is required as well as the
|
|
||||||
`C++ Actor Framework <http://actor-framework.org/>`_.
|
|
||||||
|
|
||||||
.. contents::
|
.. contents::
|
||||||
|
|
||||||
|
@ -23,26 +20,26 @@ Communication via Broker must first be turned on via
|
||||||
:bro:see:`BrokerComm::enable`.
|
:bro:see:`BrokerComm::enable`.
|
||||||
|
|
||||||
Bro can accept incoming connections by calling :bro:see:`BrokerComm::listen`
|
Bro can accept incoming connections by calling :bro:see:`BrokerComm::listen`
|
||||||
and then monitor connection status updates via
|
and then monitor connection status updates via the
|
||||||
:bro:see:`BrokerComm::incoming_connection_established` and
|
:bro:see:`BrokerComm::incoming_connection_established` and
|
||||||
:bro:see:`BrokerComm::incoming_connection_broken`.
|
:bro:see:`BrokerComm::incoming_connection_broken` events.
|
||||||
|
|
||||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-listener.bro
|
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-listener.bro
|
||||||
|
|
||||||
Bro can initiate outgoing connections by calling :bro:see:`BrokerComm::connect`
|
Bro can initiate outgoing connections by calling :bro:see:`BrokerComm::connect`
|
||||||
and then monitor connection status updates via
|
and then monitor connection status updates via the
|
||||||
:bro:see:`BrokerComm::outgoing_connection_established`,
|
:bro:see:`BrokerComm::outgoing_connection_established`,
|
||||||
:bro:see:`BrokerComm::outgoing_connection_broken`, and
|
:bro:see:`BrokerComm::outgoing_connection_broken`, and
|
||||||
:bro:see:`BrokerComm::outgoing_connection_incompatible`.
|
:bro:see:`BrokerComm::outgoing_connection_incompatible` events.
|
||||||
|
|
||||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-connector.bro
|
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-connector.bro
|
||||||
|
|
||||||
Remote Printing
|
Remote Printing
|
||||||
===============
|
===============
|
||||||
|
|
||||||
To receive remote print messages, first use
|
To receive remote print messages, first use the
|
||||||
:bro:see:`BrokerComm::subscribe_to_prints` to advertise to peers a topic
|
:bro:see:`BrokerComm::subscribe_to_prints` function to advertise to peers a
|
||||||
prefix of interest and then create an event handler for
|
topic prefix of interest and then create an event handler for
|
||||||
:bro:see:`BrokerComm::print_handler` to handle any print messages that are
|
:bro:see:`BrokerComm::print_handler` to handle any print messages that are
|
||||||
received.
|
received.
|
||||||
|
|
||||||
|
@ -71,17 +68,17 @@ the Broker message format is simply:
|
||||||
Remote Events
|
Remote Events
|
||||||
=============
|
=============
|
||||||
|
|
||||||
Receiving remote events is similar to remote prints. Just use
|
Receiving remote events is similar to remote prints. Just use the
|
||||||
:bro:see:`BrokerComm::subscribe_to_events` and possibly define any new events
|
:bro:see:`BrokerComm::subscribe_to_events` function and possibly define any
|
||||||
along with handlers that peers may want to send.
|
new events along with handlers that peers may want to send.
|
||||||
|
|
||||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/events-listener.bro
|
.. btest-include:: ${DOC_ROOT}/frameworks/broker/events-listener.bro
|
||||||
|
|
||||||
To send events, there are two choices. The first is to use call
|
There are two different ways to send events. The first is to call the
|
||||||
:bro:see:`BrokerComm::event` directly. The second option is to use
|
:bro:see:`BrokerComm::event` function directly. The second option is to call
|
||||||
:bro:see:`BrokerComm::auto_event` to make it so a particular event is
|
the :bro:see:`BrokerComm::auto_event` function where you specify a
|
||||||
automatically sent to peers whenever it is called locally via the normal
|
particular event that will be automatically sent to peers whenever the
|
||||||
event invocation syntax.
|
event is called locally via the normal event invocation syntax.
|
||||||
|
|
||||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/events-connector.bro
|
.. btest-include:: ${DOC_ROOT}/frameworks/broker/events-connector.bro
|
||||||
|
|
||||||
|
@ -98,7 +95,7 @@ the Broker message format is:
|
||||||
broker::message{std::string{}, ...};
|
broker::message{std::string{}, ...};
|
||||||
|
|
||||||
The first parameter is the name of the event and the remaining ``...``
|
The first parameter is the name of the event and the remaining ``...``
|
||||||
are its arguments, which are any of the support Broker data types as
|
are its arguments, which are any of the supported Broker data types as
|
||||||
they correspond to the Bro types for the event named in the first
|
they correspond to the Bro types for the event named in the first
|
||||||
parameter of the message.
|
parameter of the message.
|
||||||
|
|
||||||
|
@ -107,23 +104,23 @@ Remote Logging
|
||||||
|
|
||||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/testlog.bro
|
.. btest-include:: ${DOC_ROOT}/frameworks/broker/testlog.bro
|
||||||
|
|
||||||
Use :bro:see:`BrokerComm::subscribe_to_logs` to advertise interest in logs
|
Use the :bro:see:`BrokerComm::subscribe_to_logs` function to advertise interest
|
||||||
written by peers. The topic names that Bro uses are implicitly of the
|
in logs written by peers. The topic names that Bro uses are implicitly of the
|
||||||
form "bro/log/<stream-name>".
|
form "bro/log/<stream-name>".
|
||||||
|
|
||||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-listener.bro
|
.. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-listener.bro
|
||||||
|
|
||||||
To send remote logs either use :bro:see:`Log::enable_remote_logging` or
|
To send remote logs either redef :bro:see:`Log::enable_remote_logging` or
|
||||||
:bro:see:`BrokerComm::enable_remote_logs`. The former allows any log stream
|
use the :bro:see:`BrokerComm::enable_remote_logs` function. The former
|
||||||
to be sent to peers while the later toggles remote logging for
|
allows any log stream to be sent to peers while the latter enables remote
|
||||||
particular streams.
|
logging for particular streams.
|
||||||
|
|
||||||
.. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-connector.bro
|
.. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-connector.bro
|
||||||
|
|
||||||
Message Format
|
Message Format
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
For other applications that want to exchange logs messages with Bro,
|
For other applications that want to exchange log messages with Bro,
|
||||||
the Broker message format is:
|
the Broker message format is:
|
||||||
|
|
||||||
.. code:: c++
|
.. code:: c++
|
||||||
|
@ -132,7 +129,7 @@ the Broker message format is:
|
||||||
|
|
||||||
The enum value corresponds to the stream's :bro:see:`Log::ID` value, and
|
The enum value corresponds to the stream's :bro:see:`Log::ID` value, and
|
||||||
the record corresponds to a single entry of that log's columns record,
|
the record corresponds to a single entry of that log's columns record,
|
||||||
in this case a ``Test::INFO`` value.
|
in this case a ``Test::Info`` value.
|
||||||
|
|
||||||
Tuning Access Control
|
Tuning Access Control
|
||||||
=====================
|
=====================
|
||||||
|
@ -152,10 +149,11 @@ that take a :bro:see:`BrokerComm::SendFlags` such as :bro:see:`BrokerComm::print
|
||||||
:bro:see:`BrokerComm::enable_remote_logs`.
|
:bro:see:`BrokerComm::enable_remote_logs`.
|
||||||
|
|
||||||
If not using the ``auto_advertise`` flag, one can use the
|
If not using the ``auto_advertise`` flag, one can use the
|
||||||
:bro:see:`BrokerComm::advertise_topic` and :bro:see:`BrokerComm::unadvertise_topic`
|
:bro:see:`BrokerComm::advertise_topic` and
|
||||||
to manupulate the set of topic prefixes that are allowed to be
|
:bro:see:`BrokerComm::unadvertise_topic` functions
|
||||||
advertised to peers. If an endpoint does not advertise a topic prefix,
|
to manipulate the set of topic prefixes that are allowed to be
|
||||||
the only way a peers can send messages to it is via the ``unsolicited``
|
advertised to peers. If an endpoint does not advertise a topic prefix, then
|
||||||
|
the only way peers can send messages to it is via the ``unsolicited``
|
||||||
flag of :bro:see:`BrokerComm::SendFlags` and choosing a topic with a matching
|
flag of :bro:see:`BrokerComm::SendFlags` and choosing a topic with a matching
|
||||||
prefix (i.e. full topic may be longer than receivers prefix, just the
|
prefix (i.e. full topic may be longer than receivers prefix, just the
|
||||||
prefix needs to match).
|
prefix needs to match).
|
||||||
|
@ -172,7 +170,7 @@ specific type of frontend, but a standalone frontend can also exist to
|
||||||
e.g. query and modify the contents of a remote master store without
|
e.g. query and modify the contents of a remote master store without
|
||||||
actually "owning" any of the contents itself.
|
actually "owning" any of the contents itself.
|
||||||
|
|
||||||
A master data store can be be cloned from remote peers which may then
|
A master data store can be cloned from remote peers which may then
|
||||||
perform lightweight, local queries against the clone, which
|
perform lightweight, local queries against the clone, which
|
||||||
automatically stays synchronized with the master store. Clones cannot
|
automatically stays synchronized with the master store. Clones cannot
|
||||||
modify their content directly, instead they send modifications to the
|
modify their content directly, instead they send modifications to the
|
||||||
|
@ -181,7 +179,7 @@ all clones.
|
||||||
|
|
||||||
Master and clone stores get to choose what type of storage backend to
|
Master and clone stores get to choose what type of storage backend to
|
||||||
use. E.g. In-memory versus SQLite for persistence. Note that if clones
|
use. E.g. In-memory versus SQLite for persistence. Note that if clones
|
||||||
are used, data store sizes should still be able to fit within memory
|
are used, then data store sizes must be able to fit within memory
|
||||||
regardless of the storage backend as a single snapshot of the master
|
regardless of the storage backend as a single snapshot of the master
|
||||||
store is sent in a single chunk to initialize the clone.
|
store is sent in a single chunk to initialize the clone.
|
||||||
|
|
||||||
|
@ -198,5 +196,5 @@ needed, just replace the :bro:see:`BrokerStore::create_clone` call with
|
||||||
:bro:see:`BrokerStore::create_frontend`. Queries will then be made against
|
:bro:see:`BrokerStore::create_frontend`. Queries will then be made against
|
||||||
the remote master store instead of the local clone.
|
the remote master store instead of the local clone.
|
||||||
|
|
||||||
Note that all queries are made within Bro's asynchrounous ``when``
|
Note that all data store queries must be made within Bro's asynchronous
|
||||||
statements and must specify a timeout block.
|
``when`` statements and must specify a timeout block.
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
|
|
||||||
const broker_port: port = 9999/tcp &redef;
|
const broker_port: port = 9999/tcp &redef;
|
||||||
redef exit_only_after_terminate = T;
|
redef exit_only_after_terminate = T;
|
||||||
redef BrokerComm::endpoint_name = "connector";
|
redef BrokerComm::endpoint_name = "connector";
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
|
|
||||||
const broker_port: port = 9999/tcp &redef;
|
const broker_port: port = 9999/tcp &redef;
|
||||||
redef exit_only_after_terminate = T;
|
redef exit_only_after_terminate = T;
|
||||||
redef BrokerComm::endpoint_name = "listener";
|
redef BrokerComm::endpoint_name = "listener";
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
|
|
||||||
const broker_port: port = 9999/tcp &redef;
|
const broker_port: port = 9999/tcp &redef;
|
||||||
redef exit_only_after_terminate = T;
|
redef exit_only_after_terminate = T;
|
||||||
redef BrokerComm::endpoint_name = "listener";
|
redef BrokerComm::endpoint_name = "listener";
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
|
|
||||||
const broker_port: port = 9999/tcp &redef;
|
const broker_port: port = 9999/tcp &redef;
|
||||||
redef exit_only_after_terminate = T;
|
redef exit_only_after_terminate = T;
|
||||||
redef BrokerComm::endpoint_name = "listener";
|
redef BrokerComm::endpoint_name = "listener";
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
|
|
||||||
module Test;
|
module Test;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
|
|
|
@ -20,11 +20,13 @@ GeoLocation
|
||||||
Install libGeoIP
|
Install libGeoIP
|
||||||
----------------
|
----------------
|
||||||
|
|
||||||
|
Before building Bro, you need to install libGeoIP.
|
||||||
|
|
||||||
* FreeBSD:
|
* FreeBSD:
|
||||||
|
|
||||||
.. console::
|
.. console::
|
||||||
|
|
||||||
sudo pkg_add -r GeoIP
|
sudo pkg install GeoIP
|
||||||
|
|
||||||
* RPM/RedHat-based Linux:
|
* RPM/RedHat-based Linux:
|
||||||
|
|
||||||
|
@ -40,80 +42,99 @@ Install libGeoIP
|
||||||
|
|
||||||
* Mac OS X:
|
* Mac OS X:
|
||||||
|
|
||||||
Vanilla OS X installations don't ship with libGeoIP, but if
|
You need to install from your preferred package management system
|
||||||
installed from your preferred package management system (e.g.
|
(e.g. MacPorts, Fink, or Homebrew). The name of the package that you need
|
||||||
MacPorts, Fink, or Homebrew), they should be automatically detected
|
may be libgeoip, geoip, or geoip-dev, depending on which package management
|
||||||
and Bro will compile against them.
|
system you are using.
|
||||||
|
|
||||||
|
|
||||||
GeoIPLite Database Installation
|
GeoIPLite Database Installation
|
||||||
------------------------------------
|
-------------------------------
|
||||||
|
|
||||||
A country database for GeoIPLite is included when you do the C API
|
A country database for GeoIPLite is included when you do the C API
|
||||||
install, but for Bro, we are using the city database which includes
|
install, but for Bro, we are using the city database which includes
|
||||||
cities and regions in addition to countries.
|
cities and regions in addition to countries.
|
||||||
|
|
||||||
`Download <http://www.maxmind.com/app/geolitecity>`__ the GeoLite city
|
`Download <http://www.maxmind.com/app/geolitecity>`__ the GeoLite city
|
||||||
binary database.
|
binary database:
|
||||||
|
|
||||||
.. console::
|
.. console::
|
||||||
|
|
||||||
wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
|
wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
|
||||||
gunzip GeoLiteCity.dat.gz
|
gunzip GeoLiteCity.dat.gz
|
||||||
|
|
||||||
Next, the file needs to be put in the database directory. This directory
|
Next, the file needs to be renamed and put in the GeoIP database directory.
|
||||||
should already exist and will vary depending on which platform and package
|
This directory should already exist and will vary depending on which platform
|
||||||
you are using. For FreeBSD, use ``/usr/local/share/GeoIP``. For Linux,
|
and package you are using. For FreeBSD, use ``/usr/local/share/GeoIP``. For
|
||||||
use ``/usr/share/GeoIP`` or ``/var/lib/GeoIP`` (choose whichever one
|
Linux, use ``/usr/share/GeoIP`` or ``/var/lib/GeoIP`` (choose whichever one
|
||||||
already exists).
|
already exists).
|
||||||
|
|
||||||
.. console::
|
.. console::
|
||||||
|
|
||||||
mv GeoLiteCity.dat <path_to_database_dir>/GeoIPCity.dat
|
mv GeoLiteCity.dat <path_to_database_dir>/GeoIPCity.dat
|
||||||
|
|
||||||
|
Note that there is a separate database for IPv6 addresses, which can also
|
||||||
|
be installed if you want GeoIP functionality for IPv6.
|
||||||
|
|
||||||
|
Testing
|
||||||
|
-------
|
||||||
|
|
||||||
|
Before using the GeoIP functionality, it is a good idea to verify that
|
||||||
|
everything is setup correctly. After installing libGeoIP and the GeoIP city
|
||||||
|
database, and building Bro, you can quickly check if the GeoIP functionality
|
||||||
|
works by running a command like this:
|
||||||
|
|
||||||
|
.. console::
|
||||||
|
|
||||||
|
bro -e "print lookup_location(8.8.8.8);"
|
||||||
|
|
||||||
|
If you see an error message similar to "Failed to open GeoIP City database",
|
||||||
|
then you may need to either rename or move your GeoIP city database file (the
|
||||||
|
error message should give you the full pathname of the database file that
|
||||||
|
Bro is looking for).
|
||||||
|
|
||||||
|
If you see an error message similar to "Bro was not configured for GeoIP
|
||||||
|
support", then you need to rebuild Bro and make sure it is linked against
|
||||||
|
libGeoIP. Normally, if libGeoIP is installed correctly then it should
|
||||||
|
automatically be found when building Bro. If this doesn't happen, then
|
||||||
|
you may need to specify the path to the libGeoIP installation
|
||||||
|
(e.g. ``./configure --with-geoip=<path>``).
|
||||||
|
|
||||||
Usage
|
Usage
|
||||||
-----
|
-----
|
||||||
|
|
||||||
There is a single built in function that provides the GeoIP
|
There is a built-in function that provides the GeoIP functionality:
|
||||||
functionality:
|
|
||||||
|
|
||||||
.. code:: bro
|
.. code:: bro
|
||||||
|
|
||||||
function lookup_location(a:addr): geo_location
|
function lookup_location(a:addr): geo_location
|
||||||
|
|
||||||
There is also the :bro:see:`geo_location` data structure that is returned
|
The return value of the :bro:see:`lookup_location` function is a record
|
||||||
from the :bro:see:`lookup_location` function:
|
type called :bro:see:`geo_location`, and it consists of several fields
|
||||||
|
containing the country, region, city, latitude, and longitude of the specified
|
||||||
.. code:: bro
|
IP address. Since one or more fields in this record will be uninitialized
|
||||||
|
for some IP addresses (for example, the country and region of an IP address
|
||||||
type geo_location: record {
|
might be known, but the city could be unknown), a field should be checked
|
||||||
country_code: string;
|
if it has a value before trying to access the value.
|
||||||
region: string;
|
|
||||||
city: string;
|
|
||||||
latitude: double;
|
|
||||||
longitude: double;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
Example
|
Example
|
||||||
-------
|
-------
|
||||||
|
|
||||||
To write a line in a log file for every ftp connection from hosts in
|
To show every ftp connection from hosts in Ohio, this is now very easy:
|
||||||
Ohio, this is now very easy:
|
|
||||||
|
|
||||||
.. code:: bro
|
.. code:: bro
|
||||||
|
|
||||||
global ftp_location_log: file = open_log_file("ftp-location");
|
|
||||||
|
|
||||||
event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool)
|
event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool)
|
||||||
{
|
{
|
||||||
local client = c$id$orig_h;
|
local client = c$id$orig_h;
|
||||||
local loc = lookup_location(client);
|
local loc = lookup_location(client);
|
||||||
if (loc$region == "OH" && loc$country_code == "US")
|
|
||||||
|
if (loc?$region && loc$region == "OH" && loc$country_code == "US")
|
||||||
{
|
{
|
||||||
print ftp_location_log, fmt("FTP Connection from:%s (%s,%s,%s)", client, loc$city, loc$region, loc$country_code);
|
local city = loc?$city ? loc$city : "<unknown>";
|
||||||
|
|
||||||
|
print fmt("FTP Connection from:%s (%s,%s,%s)", client, city,
|
||||||
|
loc$region, loc$country_code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,8 @@ For this example we assume that we want to import data from a blacklist
|
||||||
that contains server IP addresses as well as the timestamp and the reason
|
that contains server IP addresses as well as the timestamp and the reason
|
||||||
for the block.
|
for the block.
|
||||||
|
|
||||||
An example input file could look like this:
|
An example input file could look like this (note that all fields must be
|
||||||
|
tab-separated):
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
|
@ -63,19 +64,23 @@ The two records are defined as:
|
||||||
reason: string;
|
reason: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
Note that the names of the fields in the record definitions have to correspond
|
Note that the names of the fields in the record definitions must correspond
|
||||||
to the column names listed in the '#fields' line of the log file, in this
|
to the column names listed in the '#fields' line of the log file, in this
|
||||||
case 'ip', 'timestamp', and 'reason'.
|
case 'ip', 'timestamp', and 'reason'. Also note that the ordering of the
|
||||||
|
columns does not matter, because each column is identified by name.
|
||||||
|
|
||||||
The log file is read into the table with a simple call of the ``add_table``
|
The log file is read into the table with a simple call of the
|
||||||
function:
|
:bro:id:`Input::add_table` function:
|
||||||
|
|
||||||
.. code:: bro
|
.. code:: bro
|
||||||
|
|
||||||
global blacklist: table[addr] of Val = table();
|
global blacklist: table[addr] of Val = table();
|
||||||
|
|
||||||
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist]);
|
event bro_init() {
|
||||||
|
Input::add_table([$source="blacklist.file", $name="blacklist",
|
||||||
|
$idx=Idx, $val=Val, $destination=blacklist]);
|
||||||
Input::remove("blacklist");
|
Input::remove("blacklist");
|
||||||
|
}
|
||||||
|
|
||||||
With these three lines we first create an empty table that should contain the
|
With these three lines we first create an empty table that should contain the
|
||||||
blacklist data and then instruct the input framework to open an input stream
|
blacklist data and then instruct the input framework to open an input stream
|
||||||
|
@ -92,7 +97,7 @@ Because of this, the data is not immediately accessible. Depending on the
|
||||||
size of the data source it might take from a few milliseconds up to a few
|
size of the data source it might take from a few milliseconds up to a few
|
||||||
seconds until all data is present in the table. Please note that this means
|
seconds until all data is present in the table. Please note that this means
|
||||||
that when Bro is running without an input source or on very short captured
|
that when Bro is running without an input source or on very short captured
|
||||||
files, it might terminate before the data is present in the system (because
|
files, it might terminate before the data is present in the table (because
|
||||||
Bro already handled all packets before the import thread finished).
|
Bro already handled all packets before the import thread finished).
|
||||||
|
|
||||||
Subsequent calls to an input source are queued until the previous action has
|
Subsequent calls to an input source are queued until the previous action has
|
||||||
|
@ -101,8 +106,8 @@ been completed. Because of this, it is, for example, possible to call
|
||||||
will remain queued until the first read has been completed.
|
will remain queued until the first read has been completed.
|
||||||
|
|
||||||
Once the input framework finishes reading from a data source, it fires
|
Once the input framework finishes reading from a data source, it fires
|
||||||
the ``end_of_data`` event. Once this event has been received all data
|
the :bro:id:`Input::end_of_data` event. Once this event has been received all
|
||||||
from the input file is available in the table.
|
data from the input file is available in the table.
|
||||||
|
|
||||||
.. code:: bro
|
.. code:: bro
|
||||||
|
|
||||||
|
@ -111,9 +116,9 @@ from the input file is available in the table.
|
||||||
print blacklist;
|
print blacklist;
|
||||||
}
|
}
|
||||||
|
|
||||||
The table can also already be used while the data is still being read - it
|
The table can be used while the data is still being read - it
|
||||||
just might not contain all lines in the input file when the event has not
|
just might not contain all lines from the input file before the event has
|
||||||
yet fired. After it has been populated it can be used like any other Bro
|
fired. After the table has been populated it can be used like any other Bro
|
||||||
table and blacklist entries can easily be tested:
|
table and blacklist entries can easily be tested:
|
||||||
|
|
||||||
.. code:: bro
|
.. code:: bro
|
||||||
|
@ -130,10 +135,11 @@ changing. For these cases, the Bro input framework supports several ways to
|
||||||
deal with changing data files.
|
deal with changing data files.
|
||||||
|
|
||||||
The first, very basic method is an explicit refresh of an input stream. When
|
The first, very basic method is an explicit refresh of an input stream. When
|
||||||
an input stream is open, the function ``force_update`` can be called. This
|
an input stream is open (this means it has not yet been removed by a call to
|
||||||
will trigger a complete refresh of the table; any changed elements from the
|
:bro:id:`Input::remove`), the function :bro:id:`Input::force_update` can be
|
||||||
file will be updated. After the update is finished the ``end_of_data``
|
called. This will trigger a complete refresh of the table; any changed
|
||||||
event will be raised.
|
elements from the file will be updated. After the update is finished the
|
||||||
|
:bro:id:`Input::end_of_data` event will be raised.
|
||||||
|
|
||||||
In our example the call would look like:
|
In our example the call would look like:
|
||||||
|
|
||||||
|
@ -141,30 +147,35 @@ In our example the call would look like:
|
||||||
|
|
||||||
Input::force_update("blacklist");
|
Input::force_update("blacklist");
|
||||||
|
|
||||||
The input framework also supports two automatic refresh modes. The first mode
|
Alternatively, the input framework can automatically refresh the table
|
||||||
continually checks if a file has been changed. If the file has been changed, it
|
contents when it detects a change to the input file. To use this feature,
|
||||||
|
you need to specify a non-default read mode by setting the ``mode`` option
|
||||||
|
of the :bro:id:`Input::add_table` call. Valid values are ``Input::MANUAL``
|
||||||
|
(the default), ``Input::REREAD`` and ``Input::STREAM``. For example,
|
||||||
|
setting the value of the ``mode`` option in the previous example
|
||||||
|
would look like this:
|
||||||
|
|
||||||
|
.. code:: bro
|
||||||
|
|
||||||
|
Input::add_table([$source="blacklist.file", $name="blacklist",
|
||||||
|
$idx=Idx, $val=Val, $destination=blacklist,
|
||||||
|
$mode=Input::REREAD]);
|
||||||
|
|
||||||
|
When using the reread mode (i.e., ``$mode=Input::REREAD``), Bro continually
|
||||||
|
checks if the input file has been changed. If the file has been changed, it
|
||||||
is re-read and the data in the Bro table is updated to reflect the current
|
is re-read and the data in the Bro table is updated to reflect the current
|
||||||
state. Each time a change has been detected and all the new data has been
|
state. Each time a change has been detected and all the new data has been
|
||||||
read into the table, the ``end_of_data`` event is raised.
|
read into the table, the ``end_of_data`` event is raised.
|
||||||
|
|
||||||
The second mode is a streaming mode. This mode assumes that the source data
|
When using the streaming mode (i.e., ``$mode=Input::STREAM``), Bro assumes
|
||||||
file is an append-only file to which new data is continually appended. Bro
|
that the source data file is an append-only file to which new data is
|
||||||
continually checks for new data at the end of the file and will add the new
|
continually appended. Bro continually checks for new data at the end of
|
||||||
data to the table. If newer lines in the file have the same index as previous
|
the file and will add the new data to the table. If newer lines in the
|
||||||
lines, they will overwrite the values in the output table. Because of the
|
file have the same index as previous lines, they will overwrite the
|
||||||
nature of streaming reads (data is continually added to the table),
|
values in the output table. Because of the nature of streaming reads
|
||||||
the ``end_of_data`` event is never raised when using streaming reads.
|
(data is continually added to the table), the ``end_of_data`` event
|
||||||
|
is never raised when using streaming reads.
|
||||||
|
|
||||||
The reading mode can be selected by setting the ``mode`` option of the
|
|
||||||
add_table call. Valid values are ``MANUAL`` (the default), ``REREAD``
|
|
||||||
and ``STREAM``.
|
|
||||||
|
|
||||||
Hence, when adding ``$mode=Input::REREAD`` to the previous example, the
|
|
||||||
blacklist table will always reflect the state of the blacklist input file.
|
|
||||||
|
|
||||||
.. code:: bro
|
|
||||||
|
|
||||||
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD]);
|
|
||||||
|
|
||||||
Receiving change events
|
Receiving change events
|
||||||
-----------------------
|
-----------------------
|
||||||
|
@ -173,34 +184,40 @@ When re-reading files, it might be interesting to know exactly which lines in
|
||||||
the source files have changed.
|
the source files have changed.
|
||||||
|
|
||||||
For this reason, the input framework can raise an event each time when a data
|
For this reason, the input framework can raise an event each time when a data
|
||||||
item is added to, removed from or changed in a table.
|
item is added to, removed from, or changed in a table.
|
||||||
|
|
||||||
The event definition looks like this:
|
The event definition looks like this (note that you can change the name of
|
||||||
|
this event in your own Bro script):
|
||||||
|
|
||||||
.. code:: bro
|
.. code:: bro
|
||||||
|
|
||||||
event entry(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) {
|
event entry(description: Input::TableDescription, tpe: Input::Event,
|
||||||
# act on values
|
left: Idx, right: Val) {
|
||||||
|
# do something here...
|
||||||
|
print fmt("%s = %s", left, right);
|
||||||
}
|
}
|
||||||
|
|
||||||
The event has to be specified in ``$ev`` in the ``add_table`` call:
|
The event must be specified in ``$ev`` in the ``add_table`` call:
|
||||||
|
|
||||||
.. code:: bro
|
.. code:: bro
|
||||||
|
|
||||||
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD, $ev=entry]);
|
Input::add_table([$source="blacklist.file", $name="blacklist",
|
||||||
|
$idx=Idx, $val=Val, $destination=blacklist,
|
||||||
|
$mode=Input::REREAD, $ev=entry]);
|
||||||
|
|
||||||
The ``description`` field of the event contains the arguments that were
|
The ``description`` argument of the event contains the arguments that were
|
||||||
originally supplied to the add_table call. Hence, the name of the stream can,
|
originally supplied to the add_table call. Hence, the name of the stream can,
|
||||||
for example, be accessed with ``description$name``. ``tpe`` is an enum
|
for example, be accessed with ``description$name``. The ``tpe`` argument of the
|
||||||
containing the type of the change that occurred.
|
event is an enum containing the type of the change that occurred.
|
||||||
|
|
||||||
If a line that was not previously present in the table has been added,
|
If a line that was not previously present in the table has been added,
|
||||||
then ``tpe`` will contain ``Input::EVENT_NEW``. In this case ``left`` contains
|
then the value of ``tpe`` will be ``Input::EVENT_NEW``. In this case ``left``
|
||||||
the index of the added table entry and ``right`` contains the values of the
|
contains the index of the added table entry and ``right`` contains the
|
||||||
added entry.
|
values of the added entry.
|
||||||
|
|
||||||
If a table entry that already was present is altered during the re-reading or
|
If a table entry that already was present is altered during the re-reading or
|
||||||
streaming read of a file, ``tpe`` will contain ``Input::EVENT_CHANGED``. In
|
streaming read of a file, then the value of ``tpe`` will be
|
||||||
|
``Input::EVENT_CHANGED``. In
|
||||||
this case ``left`` contains the index of the changed table entry and ``right``
|
this case ``left`` contains the index of the changed table entry and ``right``
|
||||||
contains the values of the entry before the change. The reason for this is
|
contains the values of the entry before the change. The reason for this is
|
||||||
that the table already has been updated when the event is raised. The current
|
that the table already has been updated when the event is raised. The current
|
||||||
|
@ -208,8 +225,9 @@ value in the table can be ascertained by looking up the current table value.
|
||||||
Hence it is possible to compare the new and the old values of the table.
|
Hence it is possible to compare the new and the old values of the table.
|
||||||
|
|
||||||
If a table element is removed because it was no longer present during a
|
If a table element is removed because it was no longer present during a
|
||||||
re-read, then ``tpe`` will contain ``Input::REMOVED``. In this case ``left``
|
re-read, then the value of ``tpe`` will be ``Input::EVENT_REMOVED``. In this
|
||||||
contains the index and ``right`` the values of the removed element.
|
case ``left`` contains the index and ``right`` the values of the removed
|
||||||
|
element.
|
||||||
|
|
||||||
|
|
||||||
Filtering data during import
|
Filtering data during import
|
||||||
|
@ -222,24 +240,26 @@ can either accept or veto the change by returning true for an accepted
|
||||||
change and false for a rejected change. Furthermore, it can alter the data
|
change and false for a rejected change. Furthermore, it can alter the data
|
||||||
before it is written to the table.
|
before it is written to the table.
|
||||||
|
|
||||||
The following example filter will reject to add entries to the table when
|
The following example filter will reject adding entries to the table when
|
||||||
they were generated over a month ago. It will accept all changes and all
|
they were generated over a month ago. It will accept all changes and all
|
||||||
removals of values that are already present in the table.
|
removals of values that are already present in the table.
|
||||||
|
|
||||||
.. code:: bro
|
.. code:: bro
|
||||||
|
|
||||||
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD,
|
Input::add_table([$source="blacklist.file", $name="blacklist",
|
||||||
|
$idx=Idx, $val=Val, $destination=blacklist,
|
||||||
|
$mode=Input::REREAD,
|
||||||
$pred(typ: Input::Event, left: Idx, right: Val) = {
|
$pred(typ: Input::Event, left: Idx, right: Val) = {
|
||||||
if ( typ != Input::EVENT_NEW ) {
|
if ( typ != Input::EVENT_NEW ) {
|
||||||
return T;
|
return T;
|
||||||
}
|
}
|
||||||
return ( ( current_time() - right$timestamp ) < (30 day) );
|
return (current_time() - right$timestamp) < 30day;
|
||||||
}]);
|
}]);
|
||||||
|
|
||||||
To change elements while they are being imported, the predicate function can
|
To change elements while they are being imported, the predicate function can
|
||||||
manipulate ``left`` and ``right``. Note that predicate functions are called
|
manipulate ``left`` and ``right``. Note that predicate functions are called
|
||||||
before the change is committed to the table. Hence, when a table element is
|
before the change is committed to the table. Hence, when a table element is
|
||||||
changed (``tpe`` is ``INPUT::EVENT_CHANGED``), ``left`` and ``right``
|
changed (``typ`` is ``Input::EVENT_CHANGED``), ``left`` and ``right``
|
||||||
contain the new values, but the destination (``blacklist`` in our example)
|
contain the new values, but the destination (``blacklist`` in our example)
|
||||||
still contains the old values. This allows predicate functions to examine
|
still contains the old values. This allows predicate functions to examine
|
||||||
the changes between the old and the new version before deciding if they
|
the changes between the old and the new version before deciding if they
|
||||||
|
@ -250,14 +270,19 @@ Different readers
|
||||||
|
|
||||||
The input framework supports different kinds of readers for different kinds
|
The input framework supports different kinds of readers for different kinds
|
||||||
of source data files. At the moment, the default reader reads ASCII files
|
of source data files. At the moment, the default reader reads ASCII files
|
||||||
formatted in the Bro log file format (tab-separated values). At the moment,
|
formatted in the Bro log file format (tab-separated values with a "#fields"
|
||||||
Bro comes with two other readers. The ``RAW`` reader reads a file that is
|
header line). Several other readers are included in Bro.
|
||||||
split by a specified record separator (usually newline). The contents are
|
|
||||||
|
The raw reader reads a file that is
|
||||||
|
split by a specified record separator (newline by default). The contents are
|
||||||
returned line-by-line as strings; it can, for example, be used to read
|
returned line-by-line as strings; it can, for example, be used to read
|
||||||
configuration files and the like and is probably
|
configuration files and the like and is probably
|
||||||
only useful in the event mode and not for reading data to tables.
|
only useful in the event mode and not for reading data to tables.
|
||||||
|
|
||||||
Another included reader is the ``BENCHMARK`` reader, which is being used
|
The binary reader is intended to be used with file analysis input streams (and
|
||||||
|
is the default type of reader for those streams).
|
||||||
|
|
||||||
|
The benchmark reader is being used
|
||||||
to optimize the speed of the input framework. It can generate arbitrary
|
to optimize the speed of the input framework. It can generate arbitrary
|
||||||
amounts of semi-random data in all Bro data types supported by the input
|
amounts of semi-random data in all Bro data types supported by the input
|
||||||
framework.
|
framework.
|
||||||
|
@ -270,75 +295,17 @@ aforementioned ones:
|
||||||
|
|
||||||
logging-input-sqlite
|
logging-input-sqlite
|
||||||
|
|
||||||
Add_table options
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
This section lists all possible options that can be used for the add_table
|
|
||||||
function and gives a short explanation of their use. Most of the options
|
|
||||||
already have been discussed in the previous sections.
|
|
||||||
|
|
||||||
The possible fields that can be set for a table stream are:
|
|
||||||
|
|
||||||
``source``
|
|
||||||
A mandatory string identifying the source of the data.
|
|
||||||
For the ASCII reader this is the filename.
|
|
||||||
|
|
||||||
``name``
|
|
||||||
A mandatory name for the filter that can later be used
|
|
||||||
to manipulate it further.
|
|
||||||
|
|
||||||
``idx``
|
|
||||||
Record type that defines the index of the table.
|
|
||||||
|
|
||||||
``val``
|
|
||||||
Record type that defines the values of the table.
|
|
||||||
|
|
||||||
``reader``
|
|
||||||
The reader used for this stream. Default is ``READER_ASCII``.
|
|
||||||
|
|
||||||
``mode``
|
|
||||||
The mode in which the stream is opened. Possible values are
|
|
||||||
``MANUAL``, ``REREAD`` and ``STREAM``. Default is ``MANUAL``.
|
|
||||||
``MANUAL`` means that the file is not updated after it has
|
|
||||||
been read. Changes to the file will not be reflected in the
|
|
||||||
data Bro knows. ``REREAD`` means that the whole file is read
|
|
||||||
again each time a change is found. This should be used for
|
|
||||||
files that are mapped to a table where individual lines can
|
|
||||||
change. ``STREAM`` means that the data from the file is
|
|
||||||
streamed. Events / table entries will be generated as new
|
|
||||||
data is appended to the file.
|
|
||||||
|
|
||||||
``destination``
|
|
||||||
The destination table.
|
|
||||||
|
|
||||||
``ev``
|
|
||||||
Optional event that is raised, when values are added to,
|
|
||||||
changed in, or deleted from the table. Events are passed an
|
|
||||||
Input::Event description as the first argument, the index
|
|
||||||
record as the second argument and the values as the third
|
|
||||||
argument.
|
|
||||||
|
|
||||||
``pred``
|
|
||||||
Optional predicate, that can prevent entries from being added
|
|
||||||
to the table and events from being sent.
|
|
||||||
|
|
||||||
``want_record``
|
|
||||||
Boolean value, that defines if the event wants to receive the
|
|
||||||
fields inside of a single record value, or individually
|
|
||||||
(default). This can be used if ``val`` is a record
|
|
||||||
containing only one type. In this case, if ``want_record`` is
|
|
||||||
set to false, the table will contain elements of the type
|
|
||||||
contained in ``val``.
|
|
||||||
|
|
||||||
Reading Data to Events
|
Reading Data to Events
|
||||||
======================
|
======================
|
||||||
|
|
||||||
The second supported mode of the input framework is reading data to Bro
|
The second supported mode of the input framework is reading data to Bro
|
||||||
events instead of reading them to a table using event streams.
|
events instead of reading them to a table.
|
||||||
|
|
||||||
Event streams work very similarly to table streams that were already
|
Event streams work very similarly to table streams that were already
|
||||||
discussed in much detail. To read the blacklist of the previous example
|
discussed in much detail. To read the blacklist of the previous example
|
||||||
into an event stream, the following Bro code could be used:
|
into an event stream, the :bro:id:`Input::add_event` function is used.
|
||||||
|
For example:
|
||||||
|
|
||||||
.. code:: bro
|
.. code:: bro
|
||||||
|
|
||||||
|
@ -348,12 +315,15 @@ into an event stream, the following Bro code could be used:
|
||||||
reason: string;
|
reason: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
event blacklistentry(description: Input::EventDescription, tpe: Input::Event, ip: addr, timestamp: time, reason: string) {
|
event blacklistentry(description: Input::EventDescription,
|
||||||
# work with event data
|
t: Input::Event, data: Val) {
|
||||||
|
# do something here...
|
||||||
|
print "data:", data;
|
||||||
}
|
}
|
||||||
|
|
||||||
event bro_init() {
|
event bro_init() {
|
||||||
Input::add_event([$source="blacklist.file", $name="blacklist", $fields=Val, $ev=blacklistentry]);
|
Input::add_event([$source="blacklist.file", $name="blacklist",
|
||||||
|
$fields=Val, $ev=blacklistentry]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -364,52 +334,3 @@ data types are provided in a single record definition.
|
||||||
Apart from this, event streams work exactly the same as table streams and
|
Apart from this, event streams work exactly the same as table streams and
|
||||||
support most of the options that are also supported for table streams.
|
support most of the options that are also supported for table streams.
|
||||||
|
|
||||||
The options that can be set when creating an event stream with
|
|
||||||
``add_event`` are:
|
|
||||||
|
|
||||||
``source``
|
|
||||||
A mandatory string identifying the source of the data.
|
|
||||||
For the ASCII reader this is the filename.
|
|
||||||
|
|
||||||
``name``
|
|
||||||
A mandatory name for the stream that can later be used
|
|
||||||
to remove it.
|
|
||||||
|
|
||||||
``fields``
|
|
||||||
Name of a record type containing the fields, which should be
|
|
||||||
retrieved from the input stream.
|
|
||||||
|
|
||||||
``ev``
|
|
||||||
The event which is fired, after a line has been read from the
|
|
||||||
input source. The first argument that is passed to the event
|
|
||||||
is an Input::Event structure, followed by the data, either
|
|
||||||
inside of a record (if ``want_record is set``) or as
|
|
||||||
individual fields. The Input::Event structure can contain
|
|
||||||
information, if the received line is ``NEW``, has been
|
|
||||||
``CHANGED`` or ``DELETED``. Since the ASCII reader cannot
|
|
||||||
track this information for event filters, the value is
|
|
||||||
always ``NEW`` at the moment.
|
|
||||||
|
|
||||||
``mode``
|
|
||||||
The mode in which the stream is opened. Possible values are
|
|
||||||
``MANUAL``, ``REREAD`` and ``STREAM``. Default is ``MANUAL``.
|
|
||||||
``MANUAL`` means that the file is not updated after it has
|
|
||||||
been read. Changes to the file will not be reflected in the
|
|
||||||
data Bro knows. ``REREAD`` means that the whole file is read
|
|
||||||
again each time a change is found. This should be used for
|
|
||||||
files that are mapped to a table where individual lines can
|
|
||||||
change. ``STREAM`` means that the data from the file is
|
|
||||||
streamed. Events / table entries will be generated as new
|
|
||||||
data is appended to the file.
|
|
||||||
|
|
||||||
``reader``
|
|
||||||
The reader used for this stream. Default is ``READER_ASCII``.
|
|
||||||
|
|
||||||
``want_record``
|
|
||||||
Boolean value, that defines if the event wants to receive the
|
|
||||||
fields inside of a single record value, or individually
|
|
||||||
(default). If this is set to true, the event will receive a
|
|
||||||
single record of the type provided in ``fields``.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -23,17 +23,18 @@ In contrast to the ASCII reader and writer, the SQLite plugins have not yet
|
||||||
seen extensive use in production environments. While we are not aware
|
seen extensive use in production environments. While we are not aware
|
||||||
of any issues with them, we urge to caution when using them
|
of any issues with them, we urge to caution when using them
|
||||||
in production environments. There could be lingering issues which only occur
|
in production environments. There could be lingering issues which only occur
|
||||||
when the plugins are used with high amounts of data or in high-load environments.
|
when the plugins are used with high amounts of data or in high-load
|
||||||
|
environments.
|
||||||
|
|
||||||
Logging Data into SQLite Databases
|
Logging Data into SQLite Databases
|
||||||
==================================
|
==================================
|
||||||
|
|
||||||
Logging support for SQLite is available in all Bro installations starting with
|
Logging support for SQLite is available in all Bro installations starting with
|
||||||
version 2.2. There is no need to load any additional scripts or for any compile-time
|
version 2.2. There is no need to load any additional scripts or for any
|
||||||
configurations.
|
compile-time configurations.
|
||||||
|
|
||||||
Sending data from existing logging streams to SQLite is rather straightforward. You
|
Sending data from existing logging streams to SQLite is rather straightforward.
|
||||||
have to define a filter which specifies SQLite as the writer.
|
You have to define a filter which specifies SQLite as the writer.
|
||||||
|
|
||||||
The following example code adds SQLite as a filter for the connection log:
|
The following example code adds SQLite as a filter for the connection log:
|
||||||
|
|
||||||
|
@ -44,15 +45,15 @@ The following example code adds SQLite as a filter for the connection log:
|
||||||
# Make sure this parses correctly at least.
|
# Make sure this parses correctly at least.
|
||||||
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-conn-filter.bro
|
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-conn-filter.bro
|
||||||
|
|
||||||
Bro will create the database file ``/var/db/conn.sqlite``, if it does not already exist.
|
Bro will create the database file ``/var/db/conn.sqlite``, if it does not
|
||||||
It will also create a table with the name ``conn`` (if it does not exist) and start
|
already exist. It will also create a table with the name ``conn`` (if it
|
||||||
appending connection information to the table.
|
does not exist) and start appending connection information to the table.
|
||||||
|
|
||||||
At the moment, SQLite databases are not rotated the same way ASCII log-files are. You
|
At the moment, SQLite databases are not rotated the same way ASCII log-files
|
||||||
have to take care to create them in an adequate location.
|
are. You have to take care to create them in an adequate location.
|
||||||
|
|
||||||
If you examine the resulting SQLite database, the schema will contain the same fields
|
If you examine the resulting SQLite database, the schema will contain the
|
||||||
that are present in the ASCII log files::
|
same fields that are present in the ASCII log files::
|
||||||
|
|
||||||
# sqlite3 /var/db/conn.sqlite
|
# sqlite3 /var/db/conn.sqlite
|
||||||
|
|
||||||
|
@ -75,27 +76,31 @@ from being created, you can remove the default filter:
|
||||||
Log::remove_filter(Conn::LOG, "default");
|
Log::remove_filter(Conn::LOG, "default");
|
||||||
|
|
||||||
|
|
||||||
To create a custom SQLite log file, you have to create a new log stream that contains
|
To create a custom SQLite log file, you have to create a new log stream
|
||||||
just the information you want to commit to the database. Please refer to the
|
that contains just the information you want to commit to the database.
|
||||||
:ref:`framework-logging` documentation on how to create custom log streams.
|
Please refer to the :ref:`framework-logging` documentation on how to
|
||||||
|
create custom log streams.
|
||||||
|
|
||||||
Reading Data from SQLite Databases
|
Reading Data from SQLite Databases
|
||||||
==================================
|
==================================
|
||||||
|
|
||||||
Like logging support, support for reading data from SQLite databases is built into Bro starting
|
Like logging support, support for reading data from SQLite databases is
|
||||||
with version 2.2.
|
built into Bro starting with version 2.2.
|
||||||
|
|
||||||
Just as with the text-based input readers (please refer to the :ref:`framework-input`
|
Just as with the text-based input readers (please refer to the
|
||||||
documentation for them and for basic information on how to use the input-framework), the SQLite reader
|
:ref:`framework-input` documentation for them and for basic information
|
||||||
can be used to read data - in this case the result of SQL queries - into tables or into events.
|
on how to use the input framework), the SQLite reader can be used to
|
||||||
|
read data - in this case the result of SQL queries - into tables or into
|
||||||
|
events.
|
||||||
|
|
||||||
Reading Data into Tables
|
Reading Data into Tables
|
||||||
------------------------
|
------------------------
|
||||||
|
|
||||||
To read data from a SQLite database, we first have to provide Bro with the information, how
|
To read data from a SQLite database, we first have to provide Bro with
|
||||||
the resulting data will be structured. For this example, we expect that we have a SQLite database,
|
the information, how the resulting data will be structured. For this
|
||||||
which contains host IP addresses and the user accounts that are allowed to log into a specific
|
example, we expect that we have a SQLite database, which contains
|
||||||
machine.
|
host IP addresses and the user accounts that are allowed to log into
|
||||||
|
a specific machine.
|
||||||
|
|
||||||
The SQLite commands to create the schema are as follows::
|
The SQLite commands to create the schema are as follows::
|
||||||
|
|
||||||
|
@ -107,8 +112,8 @@ The SQLite commands to create the schema are as follows::
|
||||||
insert into machines_to_users values ('192.168.17.2', 'bernhard');
|
insert into machines_to_users values ('192.168.17.2', 'bernhard');
|
||||||
insert into machines_to_users values ('192.168.17.3', 'seth,matthias');
|
insert into machines_to_users values ('192.168.17.3', 'seth,matthias');
|
||||||
|
|
||||||
After creating a file called ``hosts.sqlite`` with this content, we can read the resulting table
|
After creating a file called ``hosts.sqlite`` with this content, we can
|
||||||
into Bro:
|
read the resulting table into Bro:
|
||||||
|
|
||||||
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-read-table.bro
|
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-read-table.bro
|
||||||
|
|
||||||
|
@ -117,22 +122,25 @@ into Bro:
|
||||||
# Make sure this parses correctly at least.
|
# Make sure this parses correctly at least.
|
||||||
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-read-table.bro
|
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-read-table.bro
|
||||||
|
|
||||||
Afterwards, that table can be used to check logins into hosts against the available
|
Afterwards, that table can be used to check logins into hosts against
|
||||||
userlist.
|
the available userlist.
|
||||||
|
|
||||||
Turning Data into Events
|
Turning Data into Events
|
||||||
------------------------
|
------------------------
|
||||||
|
|
||||||
The second mode is to use the SQLite reader to output the input data as events. Typically there
|
The second mode is to use the SQLite reader to output the input data as events.
|
||||||
are two reasons to do this. First, when the structure of the input data is too complicated
|
Typically there are two reasons to do this. First, when the structure of
|
||||||
for a direct table import. In this case, the data can be read into an event which can then
|
the input data is too complicated for a direct table import. In this case,
|
||||||
create the necessary data structures in Bro in scriptland.
|
the data can be read into an event which can then create the necessary
|
||||||
|
data structures in Bro in scriptland.
|
||||||
|
|
||||||
The second reason is, that the dataset is too big to hold it in memory. In this case, the checks
|
The second reason is, that the dataset is too big to hold it in memory. In
|
||||||
can be performed on-demand, when Bro encounters a situation where it needs additional information.
|
this case, the checks can be performed on-demand, when Bro encounters a
|
||||||
|
situation where it needs additional information.
|
||||||
|
|
||||||
An example for this would be an internal huge database with malware hashes. Live database queries
|
An example for this would be an internal huge database with malware
|
||||||
could be used to check the sporadically happening downloads against the database.
|
hashes. Live database queries could be used to check the sporadically
|
||||||
|
happening downloads against the database.
|
||||||
|
|
||||||
The SQLite commands to create the schema are as follows::
|
The SQLite commands to create the schema are as follows::
|
||||||
|
|
||||||
|
@ -151,9 +159,10 @@ The SQLite commands to create the schema are as follows::
|
||||||
insert into malware_hashes values ('73f45106968ff8dc51fba105fa91306af1ff6666', 'ftp-trace');
|
insert into malware_hashes values ('73f45106968ff8dc51fba105fa91306af1ff6666', 'ftp-trace');
|
||||||
|
|
||||||
|
|
||||||
The following code uses the file-analysis framework to get the sha1 hashes of files that are
|
The following code uses the file-analysis framework to get the sha1 hashes
|
||||||
transmitted over the network. For each hash, a SQL-query is run against SQLite. If the query
|
of files that are transmitted over the network. For each hash, a SQL-query
|
||||||
returns with a result, we had a hit against our malware-database and output the matching hash.
|
is run against SQLite. If the query returns with a result, we had a hit
|
||||||
|
against our malware-database and output the matching hash.
|
||||||
|
|
||||||
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-read-events.bro
|
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-read-events.bro
|
||||||
|
|
||||||
|
@ -162,5 +171,5 @@ returns with a result, we had a hit against our malware-database and output the
|
||||||
# Make sure this parses correctly at least.
|
# Make sure this parses correctly at least.
|
||||||
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-read-events.bro
|
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-read-events.bro
|
||||||
|
|
||||||
If you run this script against the trace in ``testing/btest/Traces/ftp/ipv4.trace``, you
|
If you run this script against the trace in
|
||||||
will get one hit.
|
``testing/btest/Traces/ftp/ipv4.trace``, you will get one hit.
|
||||||
|
|
|
@ -46,4 +46,4 @@ where Bro was originally installed). Review the files for differences
|
||||||
before copying and make adjustments as necessary (use the new version for
|
before copying and make adjustments as necessary (use the new version for
|
||||||
differences that aren't a result of a local change). Of particular note,
|
differences that aren't a result of a local change). Of particular note,
|
||||||
the copied version of ``$prefix/etc/broctl.cfg`` is likely to need changes
|
the copied version of ``$prefix/etc/broctl.cfg`` is likely to need changes
|
||||||
to the ``SpoolDir`` and ``LogDir`` settings.
|
to any settings that specify a pathname.
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
.. _MacPorts: http://www.macports.org
|
.. _MacPorts: http://www.macports.org
|
||||||
.. _Fink: http://www.finkproject.org
|
.. _Fink: http://www.finkproject.org
|
||||||
.. _Homebrew: http://brew.sh
|
.. _Homebrew: http://brew.sh
|
||||||
.. _bro downloads page: http://bro.org/download/index.html
|
.. _bro downloads page: https://www.bro.org/download/index.html
|
||||||
|
|
||||||
.. _installing-bro:
|
.. _installing-bro:
|
||||||
|
|
||||||
|
@ -32,13 +32,13 @@ before you begin:
|
||||||
* Libz
|
* Libz
|
||||||
* Bash (for BroControl)
|
* Bash (for BroControl)
|
||||||
* Python (for BroControl)
|
* Python (for BroControl)
|
||||||
* C++ Actor Framework (CAF) (http://actor-framework.org)
|
* C++ Actor Framework (CAF) version 0.14 (http://actor-framework.org)
|
||||||
|
|
||||||
To build Bro from source, the following additional dependencies are required:
|
To build Bro from source, the following additional dependencies are required:
|
||||||
|
|
||||||
* CMake 2.8 or greater (http://www.cmake.org)
|
* CMake 2.8 or greater (http://www.cmake.org)
|
||||||
* Make
|
* Make
|
||||||
* C/C++ compiler with C++11 support
|
* C/C++ compiler with C++11 support (GCC 4.8+ or Clang 3.3+)
|
||||||
* SWIG (http://www.swig.org)
|
* SWIG (http://www.swig.org)
|
||||||
* Bison (GNU Parser Generator)
|
* Bison (GNU Parser Generator)
|
||||||
* Flex (Fast Lexical Analyzer)
|
* Flex (Fast Lexical Analyzer)
|
||||||
|
@ -47,9 +47,7 @@ To build Bro from source, the following additional dependencies are required:
|
||||||
* zlib headers
|
* zlib headers
|
||||||
* Python
|
* Python
|
||||||
|
|
||||||
.. todo::
|
To install CAF, first download the source code of the required version from: https://github.com/actor-framework/actor-framework/releases
|
||||||
|
|
||||||
Update with instructions for installing CAF.
|
|
||||||
|
|
||||||
To install the required dependencies, you can use:
|
To install the required dependencies, you can use:
|
||||||
|
|
||||||
|
@ -84,11 +82,11 @@ To install the required dependencies, you can use:
|
||||||
"Preferences..." -> "Downloads" menus to install the "Command Line Tools"
|
"Preferences..." -> "Downloads" menus to install the "Command Line Tools"
|
||||||
component).
|
component).
|
||||||
|
|
||||||
OS X comes with all required dependencies except for CMake_ and SWIG_.
|
OS X comes with all required dependencies except for CMake_, SWIG_, and CAF.
|
||||||
Distributions of these dependencies can likely be obtained from your
|
Distributions of these dependencies can likely be obtained from your
|
||||||
preferred Mac OS X package management system (e.g. MacPorts_, Fink_,
|
preferred Mac OS X package management system (e.g. Homebrew_, MacPorts_,
|
||||||
or Homebrew_). Specifically for MacPorts, the ``cmake``, ``swig``,
|
or Fink_). Specifically for Homebrew, the ``cmake``, ``swig``,
|
||||||
and ``swig-python`` packages provide the required dependencies.
|
and ``caf`` packages provide the required dependencies.
|
||||||
|
|
||||||
|
|
||||||
Optional Dependencies
|
Optional Dependencies
|
||||||
|
@ -101,6 +99,8 @@ build time:
|
||||||
* sendmail (enables Bro and BroControl to send mail)
|
* sendmail (enables Bro and BroControl to send mail)
|
||||||
* curl (used by a Bro script that implements active HTTP)
|
* curl (used by a Bro script that implements active HTTP)
|
||||||
* gperftools (tcmalloc is used to improve memory and CPU usage)
|
* gperftools (tcmalloc is used to improve memory and CPU usage)
|
||||||
|
* jemalloc (http://www.canonware.com/jemalloc/)
|
||||||
|
* PF_RING (Linux only, see :doc:`Cluster Configuration <../configuration/index>`)
|
||||||
* ipsumdump (for trace-summary; http://www.cs.ucla.edu/~kohler/ipsumdump)
|
* ipsumdump (for trace-summary; http://www.cs.ucla.edu/~kohler/ipsumdump)
|
||||||
|
|
||||||
LibGeoIP is probably the most interesting and can be installed
|
LibGeoIP is probably the most interesting and can be installed
|
||||||
|
@ -117,7 +117,7 @@ code forms.
|
||||||
|
|
||||||
|
|
||||||
Using Pre-Built Binary Release Packages
|
Using Pre-Built Binary Release Packages
|
||||||
=======================================
|
---------------------------------------
|
||||||
|
|
||||||
See the `bro downloads page`_ for currently supported/targeted
|
See the `bro downloads page`_ for currently supported/targeted
|
||||||
platforms for binary releases and for installation instructions.
|
platforms for binary releases and for installation instructions.
|
||||||
|
@ -138,13 +138,15 @@ platforms for binary releases and for installation instructions.
|
||||||
The primary install prefix for binary packages is ``/opt/bro``.
|
The primary install prefix for binary packages is ``/opt/bro``.
|
||||||
|
|
||||||
Installing from Source
|
Installing from Source
|
||||||
======================
|
----------------------
|
||||||
|
|
||||||
Bro releases are bundled into source packages for convenience and are
|
Bro releases are bundled into source packages for convenience and are
|
||||||
available on the `bro downloads page`_. Alternatively, the latest
|
available on the `bro downloads page`_.
|
||||||
Bro development version can be obtained through git repositories
|
|
||||||
|
Alternatively, the latest Bro development version
|
||||||
|
can be obtained through git repositories
|
||||||
hosted at ``git.bro.org``. See our `git development documentation
|
hosted at ``git.bro.org``. See our `git development documentation
|
||||||
<http://bro.org/development/howtos/process.html>`_ for comprehensive
|
<https://www.bro.org/development/howtos/process.html>`_ for comprehensive
|
||||||
information on Bro's use of git revision control, but the short story
|
information on Bro's use of git revision control, but the short story
|
||||||
for downloading the full source code experience for Bro via git is:
|
for downloading the full source code experience for Bro via git is:
|
||||||
|
|
||||||
|
@ -165,13 +167,23 @@ run ``./configure --help``):
|
||||||
make
|
make
|
||||||
make install
|
make install
|
||||||
|
|
||||||
|
If the ``configure`` script fails, then it is most likely because it either
|
||||||
|
couldn't find a required dependency or it couldn't find a sufficiently new
|
||||||
|
version of a dependency. Assuming that you already installed all required
|
||||||
|
dependencies, then you may need to use one of the ``--with-*`` options
|
||||||
|
that can be given to the ``configure`` script to help it locate a dependency.
|
||||||
|
|
||||||
The default installation path is ``/usr/local/bro``, which would typically
|
The default installation path is ``/usr/local/bro``, which would typically
|
||||||
require root privileges when doing the ``make install``. A different
|
require root privileges when doing the ``make install``. A different
|
||||||
installation path can be chosen by specifying the ``--prefix`` option.
|
installation path can be chosen by specifying the ``configure`` script
|
||||||
Note that ``/usr`` and ``/opt/bro`` are the
|
``--prefix`` option. Note that ``/usr`` and ``/opt/bro`` are the
|
||||||
standard prefixes for binary Bro packages to be installed, so those are
|
standard prefixes for binary Bro packages to be installed, so those are
|
||||||
typically not good choices unless you are creating such a package.
|
typically not good choices unless you are creating such a package.
|
||||||
|
|
||||||
|
OpenBSD users, please see our `FAQ
|
||||||
|
<https://www.bro.org/documentation/faq.html>`_ if you are having
|
||||||
|
problems installing Bro.
|
||||||
|
|
||||||
Depending on the Bro package you downloaded, there may be auxiliary
|
Depending on the Bro package you downloaded, there may be auxiliary
|
||||||
tools and libraries available in the ``aux/`` directory. Some of them
|
tools and libraries available in the ``aux/`` directory. Some of them
|
||||||
will be automatically built and installed along with Bro. There are
|
will be automatically built and installed along with Bro. There are
|
||||||
|
@ -180,10 +192,6 @@ turn off unwanted auxiliary projects that would otherwise be installed
|
||||||
automatically. Finally, use ``make install-aux`` to install some of
|
automatically. Finally, use ``make install-aux`` to install some of
|
||||||
the other programs that are in the ``aux/bro-aux`` directory.
|
the other programs that are in the ``aux/bro-aux`` directory.
|
||||||
|
|
||||||
OpenBSD users, please see our `FAQ
|
|
||||||
<//www.bro.org/documentation/faq.html>`_ if you are having
|
|
||||||
problems installing Bro.
|
|
||||||
|
|
||||||
Finally, if you want to build the Bro documentation (not required, because
|
Finally, if you want to build the Bro documentation (not required, because
|
||||||
all of the documentation for the latest Bro release is available on the
|
all of the documentation for the latest Bro release is available on the
|
||||||
Bro web site), there are instructions in ``doc/README`` in the source
|
Bro web site), there are instructions in ``doc/README`` in the source
|
||||||
|
@ -192,7 +200,7 @@ distribution.
|
||||||
Configure the Run-Time Environment
|
Configure the Run-Time Environment
|
||||||
==================================
|
==================================
|
||||||
|
|
||||||
Just remember that you may need to adjust your ``PATH`` environment variable
|
You may want to adjust your ``PATH`` environment variable
|
||||||
according to the platform/shell/package you're using. For example:
|
according to the platform/shell/package you're using. For example:
|
||||||
|
|
||||||
Bourne-Shell Syntax:
|
Bourne-Shell Syntax:
|
||||||
|
|
|
@ -54,13 +54,16 @@ Here is a more detailed explanation of each attribute:
|
||||||
|
|
||||||
.. bro:attr:: &redef
|
.. bro:attr:: &redef
|
||||||
|
|
||||||
Allows for redefinition of initial values of global objects declared as
|
Allows use of a :bro:keyword:`redef` to redefine initial values of
|
||||||
constant.
|
global variables (i.e., variables declared either :bro:keyword:`global`
|
||||||
|
or :bro:keyword:`const`). Example::
|
||||||
In this example, the constant (assuming it is global) can be redefined
|
|
||||||
with a :bro:keyword:`redef` at some later point::
|
|
||||||
|
|
||||||
const clever = T &redef;
|
const clever = T &redef;
|
||||||
|
global cache_size = 256 &redef;
|
||||||
|
|
||||||
|
Note that a variable declared "global" can also have its value changed
|
||||||
|
with assignment statements (doesn't matter if it has the "&redef"
|
||||||
|
attribute or not).
|
||||||
|
|
||||||
.. bro:attr:: &priority
|
.. bro:attr:: &priority
|
||||||
|
|
||||||
|
|
|
@ -71,9 +71,11 @@ Statements
|
||||||
Declarations
|
Declarations
|
||||||
------------
|
------------
|
||||||
|
|
||||||
The following global declarations cannot occur within a function, hook, or
|
Declarations cannot occur within a function, hook, or event handler.
|
||||||
event handler. Also, these declarations cannot appear after any statements
|
|
||||||
that are outside of a function, hook, or event handler.
|
Declarations must appear before any statements (except those statements
|
||||||
|
that are in a function, hook, or event handler) in the concatenation of
|
||||||
|
all loaded Bro scripts.
|
||||||
|
|
||||||
.. bro:keyword:: module
|
.. bro:keyword:: module
|
||||||
|
|
||||||
|
@ -126,9 +128,12 @@ that are outside of a function, hook, or event handler.
|
||||||
.. bro:keyword:: global
|
.. bro:keyword:: global
|
||||||
|
|
||||||
Variables declared with the "global" keyword will be global.
|
Variables declared with the "global" keyword will be global.
|
||||||
|
|
||||||
If a type is not specified, then an initializer is required so that
|
If a type is not specified, then an initializer is required so that
|
||||||
the type can be inferred. Likewise, if an initializer is not supplied,
|
the type can be inferred. Likewise, if an initializer is not supplied,
|
||||||
then the type must be specified. Example::
|
then the type must be specified. In some cases, when the type cannot
|
||||||
|
be correctly inferred, the type must be specified even when an
|
||||||
|
initializer is present. Example::
|
||||||
|
|
||||||
global pi = 3.14;
|
global pi = 3.14;
|
||||||
global hosts: set[addr];
|
global hosts: set[addr];
|
||||||
|
@ -136,10 +141,11 @@ that are outside of a function, hook, or event handler.
|
||||||
|
|
||||||
Variable declarations outside of any function, hook, or event handler are
|
Variable declarations outside of any function, hook, or event handler are
|
||||||
required to use this keyword (unless they are declared with the
|
required to use this keyword (unless they are declared with the
|
||||||
:bro:keyword:`const` keyword). Definitions of functions, hooks, and
|
:bro:keyword:`const` keyword instead).
|
||||||
event handlers are not allowed to use the "global"
|
|
||||||
keyword (they already have global scope), except function declarations
|
Definitions of functions, hooks, and event handlers are not allowed
|
||||||
where no function body is supplied use the "global" keyword.
|
to use the "global" keyword. However, function declarations (i.e., no
|
||||||
|
function body is provided) can use the "global" keyword.
|
||||||
|
|
||||||
The scope of a global variable begins where the declaration is located,
|
The scope of a global variable begins where the declaration is located,
|
||||||
and extends through all remaining Bro scripts that are loaded (however,
|
and extends through all remaining Bro scripts that are loaded (however,
|
||||||
|
@ -150,18 +156,22 @@ that are outside of a function, hook, or event handler.
|
||||||
.. bro:keyword:: const
|
.. bro:keyword:: const
|
||||||
|
|
||||||
A variable declared with the "const" keyword will be constant.
|
A variable declared with the "const" keyword will be constant.
|
||||||
|
|
||||||
Variables declared as constant are required to be initialized at the
|
Variables declared as constant are required to be initialized at the
|
||||||
time of declaration. Example::
|
time of declaration. Normally, the type is inferred from the initializer,
|
||||||
|
but the type can be explicitly specified. Example::
|
||||||
|
|
||||||
const pi = 3.14;
|
const pi = 3.14;
|
||||||
const ssh_port: port = 22/tcp;
|
const ssh_port: port = 22/tcp;
|
||||||
|
|
||||||
The value of a constant cannot be changed later (the only
|
The value of a constant cannot be changed. The only exception is if the
|
||||||
exception is if the variable is global and has the :bro:attr:`&redef`
|
variable is a global constant and has the :bro:attr:`&redef`
|
||||||
attribute, then its value can be changed only with a :bro:keyword:`redef`).
|
attribute, but even then its value can be changed only with a
|
||||||
|
:bro:keyword:`redef`.
|
||||||
|
|
||||||
The scope of a constant is local if the declaration is in a
|
The scope of a constant is local if the declaration is in a
|
||||||
function, hook, or event handler, and global otherwise.
|
function, hook, or event handler, and global otherwise.
|
||||||
|
|
||||||
Note that the "const" keyword cannot be used with either the "local"
|
Note that the "const" keyword cannot be used with either the "local"
|
||||||
or "global" keywords (i.e., "const" replaces "local" and "global").
|
or "global" keywords (i.e., "const" replaces "local" and "global").
|
||||||
|
|
||||||
|
@ -184,7 +194,8 @@ that are outside of a function, hook, or event handler.
|
||||||
.. bro:keyword:: redef
|
.. bro:keyword:: redef
|
||||||
|
|
||||||
There are three ways that "redef" can be used: to change the value of
|
There are three ways that "redef" can be used: to change the value of
|
||||||
a global variable, to extend a record type or enum type, or to specify
|
a global variable (but only if it has the :bro:attr:`&redef` attribute),
|
||||||
|
to extend a record type or enum type, or to specify
|
||||||
a new event handler body that replaces all those that were previously
|
a new event handler body that replaces all those that were previously
|
||||||
defined.
|
defined.
|
||||||
|
|
||||||
|
@ -237,13 +248,14 @@ that are outside of a function, hook, or event handler.
|
||||||
Statements
|
Statements
|
||||||
----------
|
----------
|
||||||
|
|
||||||
|
Statements (except those contained within a function, hook, or event
|
||||||
|
handler) can appear only after all global declarations in the concatenation
|
||||||
|
of all loaded Bro scripts.
|
||||||
|
|
||||||
Each statement in a Bro script must be terminated with a semicolon (with a
|
Each statement in a Bro script must be terminated with a semicolon (with a
|
||||||
few exceptions noted below). An individual statement can span multiple
|
few exceptions noted below). An individual statement can span multiple
|
||||||
lines.
|
lines.
|
||||||
|
|
||||||
All statements (except those contained within a function, hook, or event
|
|
||||||
handler) must appear after all global declarations.
|
|
||||||
|
|
||||||
Here are the statements that the Bro scripting language supports.
|
Here are the statements that the Bro scripting language supports.
|
||||||
|
|
||||||
.. bro:keyword:: add
|
.. bro:keyword:: add
|
||||||
|
|
|
@ -340,15 +340,18 @@ Here is a more detailed description of each type:
|
||||||
|
|
||||||
table [ type^+ ] of type
|
table [ type^+ ] of type
|
||||||
|
|
||||||
where *type^+* is one or more types, separated by commas.
|
where *type^+* is one or more types, separated by commas. The
|
||||||
For example:
|
index type cannot be any of the following types: pattern, table, set,
|
||||||
|
vector, file, opaque, any.
|
||||||
|
|
||||||
|
Here is an example of declaring a table indexed by "count" values
|
||||||
|
and yielding "string" values:
|
||||||
|
|
||||||
.. code:: bro
|
.. code:: bro
|
||||||
|
|
||||||
global a: table[count] of string;
|
global a: table[count] of string;
|
||||||
|
|
||||||
declares a table indexed by "count" values and yielding
|
The yield type can also be more complex:
|
||||||
"string" values. The yield type can also be more complex:
|
|
||||||
|
|
||||||
.. code:: bro
|
.. code:: bro
|
||||||
|
|
||||||
|
@ -441,7 +444,9 @@ Here is a more detailed description of each type:
|
||||||
|
|
||||||
set [ type^+ ]
|
set [ type^+ ]
|
||||||
|
|
||||||
where *type^+* is one or more types separated by commas.
|
where *type^+* is one or more types separated by commas. The
|
||||||
|
index type cannot be any of the following types: pattern, table, set,
|
||||||
|
vector, file, opaque, any.
|
||||||
|
|
||||||
Sets can be initialized by listing elements enclosed by curly braces:
|
Sets can be initialized by listing elements enclosed by curly braces:
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ type Service: record {
|
||||||
rfc: count;
|
rfc: count;
|
||||||
};
|
};
|
||||||
|
|
||||||
function print_service(serv: Service): string
|
function print_service(serv: Service)
|
||||||
{
|
{
|
||||||
print fmt("Service: %s(RFC%d)",serv$name, serv$rfc);
|
print fmt("Service: %s(RFC%d)",serv$name, serv$rfc);
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ type System: record {
|
||||||
services: set[Service];
|
services: set[Service];
|
||||||
};
|
};
|
||||||
|
|
||||||
function print_service(serv: Service): string
|
function print_service(serv: Service)
|
||||||
{
|
{
|
||||||
print fmt(" Service: %s(RFC%d)",serv$name, serv$rfc);
|
print fmt(" Service: %s(RFC%d)",serv$name, serv$rfc);
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ function print_service(serv: Service): string
|
||||||
print fmt(" port: %s", p);
|
print fmt(" port: %s", p);
|
||||||
}
|
}
|
||||||
|
|
||||||
function print_system(sys: System): string
|
function print_system(sys: System)
|
||||||
{
|
{
|
||||||
print fmt("System: %s", sys$name);
|
print fmt("System: %s", sys$name);
|
||||||
|
|
||||||
|
|
|
@ -1,18 +1,25 @@
|
||||||
##! The input framework provides a way to read previously stored data either
|
##! The input framework provides a way to read previously stored data either
|
||||||
##! as an event stream or into a bro table.
|
##! as an event stream or into a Bro table.
|
||||||
|
|
||||||
module Input;
|
module Input;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
type Event: enum {
|
type Event: enum {
|
||||||
|
## New data has been imported.
|
||||||
EVENT_NEW = 0,
|
EVENT_NEW = 0,
|
||||||
|
## Existing data has been changed.
|
||||||
EVENT_CHANGED = 1,
|
EVENT_CHANGED = 1,
|
||||||
|
## Previously existing data has been removed.
|
||||||
EVENT_REMOVED = 2,
|
EVENT_REMOVED = 2,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
## Type that defines the input stream read mode.
|
||||||
type Mode: enum {
|
type Mode: enum {
|
||||||
|
## Do not automatically reread the file after it has been read.
|
||||||
MANUAL = 0,
|
MANUAL = 0,
|
||||||
|
## Reread the entire file each time a change is found.
|
||||||
REREAD = 1,
|
REREAD = 1,
|
||||||
|
## Read data from end of file each time new data is appended.
|
||||||
STREAM = 2
|
STREAM = 2
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -24,20 +31,20 @@ export {
|
||||||
|
|
||||||
## Separator between fields.
|
## Separator between fields.
|
||||||
## Please note that the separator has to be exactly one character long.
|
## Please note that the separator has to be exactly one character long.
|
||||||
## Can be overwritten by individual writers.
|
## Individual readers can use a different value.
|
||||||
const separator = "\t" &redef;
|
const separator = "\t" &redef;
|
||||||
|
|
||||||
## Separator between set elements.
|
## Separator between set elements.
|
||||||
## Please note that the separator has to be exactly one character long.
|
## Please note that the separator has to be exactly one character long.
|
||||||
## Can be overwritten by individual writers.
|
## Individual readers can use a different value.
|
||||||
const set_separator = "," &redef;
|
const set_separator = "," &redef;
|
||||||
|
|
||||||
## String to use for empty fields.
|
## String to use for empty fields.
|
||||||
## Can be overwritten by individual writers.
|
## Individual readers can use a different value.
|
||||||
const empty_field = "(empty)" &redef;
|
const empty_field = "(empty)" &redef;
|
||||||
|
|
||||||
## String to use for an unset &optional field.
|
## String to use for an unset &optional field.
|
||||||
## Can be overwritten by individual writers.
|
## Individual readers can use a different value.
|
||||||
const unset_field = "-" &redef;
|
const unset_field = "-" &redef;
|
||||||
|
|
||||||
## Flag that controls if the input framework accepts records
|
## Flag that controls if the input framework accepts records
|
||||||
|
@ -47,11 +54,11 @@ export {
|
||||||
## abort. Defaults to false (abort).
|
## abort. Defaults to false (abort).
|
||||||
const accept_unsupported_types = F &redef;
|
const accept_unsupported_types = F &redef;
|
||||||
|
|
||||||
## TableFilter description type used for the `table` method.
|
## A table input stream type used to send data to a Bro table.
|
||||||
type TableDescription: record {
|
type TableDescription: record {
|
||||||
# Common definitions for tables and events
|
# Common definitions for tables and events
|
||||||
|
|
||||||
## String that allows the reader to find the source.
|
## String that allows the reader to find the source of the data.
|
||||||
## For `READER_ASCII`, this is the filename.
|
## For `READER_ASCII`, this is the filename.
|
||||||
source: string;
|
source: string;
|
||||||
|
|
||||||
|
@ -61,7 +68,8 @@ export {
|
||||||
## Read mode to use for this stream.
|
## Read mode to use for this stream.
|
||||||
mode: Mode &default=default_mode;
|
mode: Mode &default=default_mode;
|
||||||
|
|
||||||
## Descriptive name. Used to remove a stream at a later time.
|
## Name of the input stream. This is used by some functions to
|
||||||
|
## manipulate the stream.
|
||||||
name: string;
|
name: string;
|
||||||
|
|
||||||
# Special definitions for tables
|
# Special definitions for tables
|
||||||
|
@ -73,31 +81,35 @@ export {
|
||||||
idx: any;
|
idx: any;
|
||||||
|
|
||||||
## Record that defines the values used as the elements of the table.
|
## Record that defines the values used as the elements of the table.
|
||||||
## If this is undefined, then *destination* has to be a set.
|
## If this is undefined, then *destination* must be a set.
|
||||||
val: any &optional;
|
val: any &optional;
|
||||||
|
|
||||||
## Defines if the value of the table is a record (default), or a single value.
|
## Defines if the value of the table is a record (default), or a single
|
||||||
## When this is set to false, then *val* can only contain one element.
|
## value. When this is set to false, then *val* can only contain one
|
||||||
|
## element.
|
||||||
want_record: bool &default=T;
|
want_record: bool &default=T;
|
||||||
|
|
||||||
## The event that is raised each time a value is added to, changed in or removed
|
## The event that is raised each time a value is added to, changed in,
|
||||||
## from the table. The event will receive an Input::Event enum as the first
|
## or removed from the table. The event will receive an
|
||||||
## argument, the *idx* record as the second argument and the value (record) as the
|
## Input::TableDescription as the first argument, an Input::Event
|
||||||
## third argument.
|
## enum as the second argument, the *idx* record as the third argument
|
||||||
ev: any &optional; # event containing idx, val as values.
|
## and the value (record) as the fourth argument.
|
||||||
|
ev: any &optional;
|
||||||
|
|
||||||
## Predicate function that can decide if an insertion, update or removal should
|
## Predicate function that can decide if an insertion, update or removal
|
||||||
## really be executed. Parameters are the same as for the event. If true is
|
## should really be executed. Parameters have same meaning as for the
|
||||||
## returned, the update is performed. If false is returned, it is skipped.
|
## event.
|
||||||
|
## If true is returned, the update is performed. If false is returned,
|
||||||
|
## it is skipped.
|
||||||
pred: function(typ: Input::Event, left: any, right: any): bool &optional;
|
pred: function(typ: Input::Event, left: any, right: any): bool &optional;
|
||||||
|
|
||||||
## A key/value table that will be passed on the reader.
|
## A key/value table that will be passed to the reader.
|
||||||
## Interpretation of the values is left to the writer, but
|
## Interpretation of the values is left to the reader, but
|
||||||
## usually they will be used for configuration purposes.
|
## usually they will be used for configuration purposes.
|
||||||
config: table[string] of string &default=table();
|
config: table[string] of string &default=table();
|
||||||
};
|
};
|
||||||
|
|
||||||
## EventFilter description type used for the `event` method.
|
## An event input stream type used to send input data to a Bro event.
|
||||||
type EventDescription: record {
|
type EventDescription: record {
|
||||||
# Common definitions for tables and events
|
# Common definitions for tables and events
|
||||||
|
|
||||||
|
@ -116,19 +128,26 @@ export {
|
||||||
|
|
||||||
# Special definitions for events
|
# Special definitions for events
|
||||||
|
|
||||||
## Record describing the fields to be retrieved from the source input.
|
## Record type describing the fields to be retrieved from the input
|
||||||
|
## source.
|
||||||
fields: any;
|
fields: any;
|
||||||
|
|
||||||
## If this is false, the event receives each value in fields as a separate argument.
|
## If this is false, the event receives each value in *fields* as a
|
||||||
## If this is set to true (default), the event receives all fields in a single record value.
|
## separate argument.
|
||||||
|
## If this is set to true (default), the event receives all fields in
|
||||||
|
## a single record value.
|
||||||
want_record: bool &default=T;
|
want_record: bool &default=T;
|
||||||
|
|
||||||
## The event that is raised each time a new line is received from the reader.
|
## The event that is raised each time a new line is received from the
|
||||||
## The event will receive an Input::Event enum as the first element, and the fields as the following arguments.
|
## reader. The event will receive an Input::EventDescription record
|
||||||
|
## as the first argument, an Input::Event enum as the second
|
||||||
|
## argument, and the fields (as specified in *fields*) as the following
|
||||||
|
## arguments (this will either be a single record value containing
|
||||||
|
## all fields, or each field value as a separate argument).
|
||||||
ev: any;
|
ev: any;
|
||||||
|
|
||||||
## A key/value table that will be passed on the reader.
|
## A key/value table that will be passed to the reader.
|
||||||
## Interpretation of the values is left to the writer, but
|
## Interpretation of the values is left to the reader, but
|
||||||
## usually they will be used for configuration purposes.
|
## usually they will be used for configuration purposes.
|
||||||
config: table[string] of string &default=table();
|
config: table[string] of string &default=table();
|
||||||
};
|
};
|
||||||
|
@ -155,28 +174,29 @@ export {
|
||||||
## field will be the same value as the *source* field.
|
## field will be the same value as the *source* field.
|
||||||
name: string;
|
name: string;
|
||||||
|
|
||||||
## A key/value table that will be passed on the reader.
|
## A key/value table that will be passed to the reader.
|
||||||
## Interpretation of the values is left to the writer, but
|
## Interpretation of the values is left to the reader, but
|
||||||
## usually they will be used for configuration purposes.
|
## usually they will be used for configuration purposes.
|
||||||
config: table[string] of string &default=table();
|
config: table[string] of string &default=table();
|
||||||
};
|
};
|
||||||
|
|
||||||
## Create a new table input from a given source.
|
## Create a new table input stream from a given source.
|
||||||
##
|
##
|
||||||
## description: `TableDescription` record describing the source.
|
## description: `TableDescription` record describing the source.
|
||||||
##
|
##
|
||||||
## Returns: true on success.
|
## Returns: true on success.
|
||||||
global add_table: function(description: Input::TableDescription) : bool;
|
global add_table: function(description: Input::TableDescription) : bool;
|
||||||
|
|
||||||
## Create a new event input from a given source.
|
## Create a new event input stream from a given source.
|
||||||
##
|
##
|
||||||
## description: `EventDescription` record describing the source.
|
## description: `EventDescription` record describing the source.
|
||||||
##
|
##
|
||||||
## Returns: true on success.
|
## Returns: true on success.
|
||||||
global add_event: function(description: Input::EventDescription) : bool;
|
global add_event: function(description: Input::EventDescription) : bool;
|
||||||
|
|
||||||
## Create a new file analysis input from a given source. Data read from
|
## Create a new file analysis input stream from a given source. Data read
|
||||||
## the source is automatically forwarded to the file analysis framework.
|
## from the source is automatically forwarded to the file analysis
|
||||||
|
## framework.
|
||||||
##
|
##
|
||||||
## description: A record describing the source.
|
## description: A record describing the source.
|
||||||
##
|
##
|
||||||
|
@ -199,7 +219,11 @@ export {
|
||||||
|
|
||||||
## Event that is called when the end of a data source has been reached,
|
## Event that is called when the end of a data source has been reached,
|
||||||
## including after an update.
|
## including after an update.
|
||||||
global end_of_data: event(name: string, source:string);
|
##
|
||||||
|
## name: Name of the input stream.
|
||||||
|
##
|
||||||
|
## source: String that identifies the data source (such as the filename).
|
||||||
|
global end_of_data: event(name: string, source: string);
|
||||||
}
|
}
|
||||||
|
|
||||||
@load base/bif/input.bif
|
@load base/bif/input.bif
|
||||||
|
|
|
@ -11,7 +11,9 @@ export {
|
||||||
##
|
##
|
||||||
## name: name of the input stream.
|
## name: name of the input stream.
|
||||||
## source: source of the input stream.
|
## source: source of the input stream.
|
||||||
## exit_code: exit code of the program, or number of the signal that forced the program to exit.
|
## exit_code: exit code of the program, or number of the signal that forced
|
||||||
## signal_exit: false when program exited normally, true when program was forced to exit by a signal.
|
## the program to exit.
|
||||||
|
## signal_exit: false when program exited normally, true when program was
|
||||||
|
## forced to exit by a signal.
|
||||||
global process_finished: event(name: string, source:string, exit_code:count, signal_exit:bool);
|
global process_finished: event(name: string, source:string, exit_code:count, signal_exit:bool);
|
||||||
}
|
}
|
||||||
|
|
|
@ -349,7 +349,7 @@ type connection: record {
|
||||||
## The outer VLAN, if applicable, for this connection.
|
## The outer VLAN, if applicable, for this connection.
|
||||||
vlan: int &optional;
|
vlan: int &optional;
|
||||||
|
|
||||||
## The VLAN vlan, if applicable, for this connection.
|
## The inner VLAN, if applicable, for this connection.
|
||||||
inner_vlan: int &optional;
|
inner_vlan: int &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -87,8 +87,8 @@ export {
|
||||||
## f packet with FIN bit set
|
## f packet with FIN bit set
|
||||||
## r packet with RST bit set
|
## r packet with RST bit set
|
||||||
## c packet with a bad checksum
|
## c packet with a bad checksum
|
||||||
## i inconsistent packet (FIN+RST bits both set)
|
## i inconsistent packet (e.g. FIN+RST bits set)
|
||||||
## q multi-flag packet (SYN+FIN or SYN+RST bits both set)
|
## q multi-flag packet (SYN+FIN or SYN+RST bits set)
|
||||||
## ====== ====================================================
|
## ====== ====================================================
|
||||||
##
|
##
|
||||||
## If the event comes from the originator, the letter is in
|
## If the event comes from the originator, the letter is in
|
||||||
|
|
37
src/bro.bif
37
src/bro.bif
|
@ -2725,13 +2725,12 @@ function hexstr_to_bytestring%(hexstr: string%): string
|
||||||
##
|
##
|
||||||
## s: The string to encode.
|
## s: The string to encode.
|
||||||
##
|
##
|
||||||
## a: An optional custom alphabet. The empty string indicates the default alphabet.
|
## a: An optional custom alphabet. The empty string indicates the default
|
||||||
## If given, the length of *a* must be 64. For example, a custom alphabet could be
|
## alphabet. If given, the string must consist of 64 unique characters.
|
||||||
## ``"!#$%&/(),-.:;<>@[]^ `_{|}~abcdefghijklmnopqrstuvwxyz0123456789+?"``.
|
|
||||||
##
|
##
|
||||||
## Returns: The encoded version of *s*.
|
## Returns: The encoded version of *s*.
|
||||||
##
|
##
|
||||||
## .. bro:see:: decode_base64 decode_base64_conn
|
## .. bro:see:: decode_base64
|
||||||
function encode_base64%(s: string, a: string &default=""%): string
|
function encode_base64%(s: string, a: string &default=""%): string
|
||||||
%{
|
%{
|
||||||
BroString* t = encode_base64(s->AsString(), a->AsString());
|
BroString* t = encode_base64(s->AsString(), a->AsString());
|
||||||
|
@ -2749,13 +2748,12 @@ function encode_base64%(s: string, a: string &default=""%): string
|
||||||
##
|
##
|
||||||
## s: The string to encode.
|
## s: The string to encode.
|
||||||
##
|
##
|
||||||
## a: An optional custom alphabet. The empty string indicates the default alphabet.
|
## a: The custom alphabet. The string must consist of 64 unique
|
||||||
## If given, the length of *a* must be 64. For example, a custom alphabet could be
|
## characters. The empty string indicates the default alphabet.
|
||||||
## ``"!#$%&/(),-.:;<>@[]^ `_{|}~abcdefghijklmnopqrstuvwxyz0123456789+?"``.
|
|
||||||
##
|
##
|
||||||
## Returns: The encoded version of *s*.
|
## Returns: The encoded version of *s*.
|
||||||
##
|
##
|
||||||
## .. bro:see:: encode_base64 decode_base64 decode_base64_conn
|
## .. bro:see:: encode_base64
|
||||||
function encode_base64_custom%(s: string, a: string%): string &deprecated
|
function encode_base64_custom%(s: string, a: string%): string &deprecated
|
||||||
%{
|
%{
|
||||||
BroString* t = encode_base64(s->AsString(), a->AsString());
|
BroString* t = encode_base64(s->AsString(), a->AsString());
|
||||||
|
@ -2772,13 +2770,12 @@ function encode_base64_custom%(s: string, a: string%): string &deprecated
|
||||||
##
|
##
|
||||||
## s: The Base64-encoded string.
|
## s: The Base64-encoded string.
|
||||||
##
|
##
|
||||||
## a: An optional custom alphabet. The empty string indicates the default alphabet.
|
## a: An optional custom alphabet. The empty string indicates the default
|
||||||
## If given, the length of *a* must be 64. For example, a custom alphabet could be
|
## alphabet. If given, the string must consist of 64 unique characters.
|
||||||
## ``"!#$%&/(),-.:;<>@[]^ `_{|}~abcdefghijklmnopqrstuvwxyz0123456789+?"``.
|
|
||||||
##
|
##
|
||||||
## Returns: The decoded version of *s*.
|
## Returns: The decoded version of *s*.
|
||||||
##
|
##
|
||||||
## .. bro:see:: decode_base64_intern encode_base64
|
## .. bro:see:: decode_base64_conn encode_base64
|
||||||
function decode_base64%(s: string, a: string &default=""%): string
|
function decode_base64%(s: string, a: string &default=""%): string
|
||||||
%{
|
%{
|
||||||
BroString* t = decode_base64(s->AsString(), a->AsString());
|
BroString* t = decode_base64(s->AsString(), a->AsString());
|
||||||
|
@ -2793,19 +2790,18 @@ function decode_base64%(s: string, a: string &default=""%): string
|
||||||
|
|
||||||
## Decodes a Base64-encoded string that was derived from processing a connection.
|
## Decodes a Base64-encoded string that was derived from processing a connection.
|
||||||
## If an error is encountered decoding the string, that will be logged to
|
## If an error is encountered decoding the string, that will be logged to
|
||||||
## ``weird.log`` with the associated connection,
|
## ``weird.log`` with the associated connection.
|
||||||
##
|
##
|
||||||
## cid: The identifier of the connection that the encoding originates from.
|
## cid: The identifier of the connection that the encoding originates from.
|
||||||
##
|
##
|
||||||
## s: The Base64-encoded string.
|
## s: The Base64-encoded string.
|
||||||
##
|
##
|
||||||
## a: An optional custom alphabet. The empty string indicates the default alphabet.
|
## a: An optional custom alphabet. The empty string indicates the default
|
||||||
## If given, the length of *a* must be 64. For example, a custom alphabet could be
|
## alphabet. If given, the string must consist of 64 unique characters.
|
||||||
## ``"!#$%&/(),-.:;<>@[]^ `_{|}~abcdefghijklmnopqrstuvwxyz0123456789+?"``.
|
|
||||||
##
|
##
|
||||||
## Returns: The decoded version of *s*.
|
## Returns: The decoded version of *s*.
|
||||||
##
|
##
|
||||||
## .. bro:see:: decode_base64 encode_base64_intern
|
## .. bro:see:: decode_base64
|
||||||
function decode_base64_conn%(cid: conn_id, s: string, a: string &default=""%): string
|
function decode_base64_conn%(cid: conn_id, s: string, a: string &default=""%): string
|
||||||
%{
|
%{
|
||||||
Connection* conn = sessions->FindConnection(cid);
|
Connection* conn = sessions->FindConnection(cid);
|
||||||
|
@ -2829,13 +2825,12 @@ function decode_base64_conn%(cid: conn_id, s: string, a: string &default=""%): s
|
||||||
##
|
##
|
||||||
## s: The Base64-encoded string.
|
## s: The Base64-encoded string.
|
||||||
##
|
##
|
||||||
## a: The custom alphabet. The empty string indicates the default alphabet. The
|
## a: The custom alphabet. The string must consist of 64 unique characters.
|
||||||
## length of *a* must be 64. For example, a custom alphabet could be
|
## The empty string indicates the default alphabet.
|
||||||
## ``"!#$%&/(),-.:;<>@[]^ `_{|}~abcdefghijklmnopqrstuvwxyz0123456789+?"``.
|
|
||||||
##
|
##
|
||||||
## Returns: The decoded version of *s*.
|
## Returns: The decoded version of *s*.
|
||||||
##
|
##
|
||||||
## .. bro:see:: decode_base64 decode_base64_conn encode_base64
|
## .. bro:see:: decode_base64 decode_base64_conn
|
||||||
function decode_base64_custom%(s: string, a: string%): string &deprecated
|
function decode_base64_custom%(s: string, a: string%): string &deprecated
|
||||||
%{
|
%{
|
||||||
BroString* t = decode_base64(s->AsString(), a->AsString());
|
BroString* t = decode_base64(s->AsString(), a->AsString());
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
connecting-connector.bro
|
connecting-connector.bro
|
||||||
|
|
||||||
|
|
||||||
const broker_port: port = 9999/tcp &redef;
|
const broker_port: port = 9999/tcp &redef;
|
||||||
redef exit_only_after_terminate = T;
|
redef exit_only_after_terminate = T;
|
||||||
redef BrokerComm::endpoint_name = "connector";
|
redef BrokerComm::endpoint_name = "connector";
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
connecting-listener.bro
|
connecting-listener.bro
|
||||||
|
|
||||||
|
|
||||||
const broker_port: port = 9999/tcp &redef;
|
const broker_port: port = 9999/tcp &redef;
|
||||||
redef exit_only_after_terminate = T;
|
redef exit_only_after_terminate = T;
|
||||||
redef BrokerComm::endpoint_name = "listener";
|
redef BrokerComm::endpoint_name = "listener";
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
events-listener.bro
|
events-listener.bro
|
||||||
|
|
||||||
|
|
||||||
const broker_port: port = 9999/tcp &redef;
|
const broker_port: port = 9999/tcp &redef;
|
||||||
redef exit_only_after_terminate = T;
|
redef exit_only_after_terminate = T;
|
||||||
redef BrokerComm::endpoint_name = "listener";
|
redef BrokerComm::endpoint_name = "listener";
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
printing-listener.bro
|
printing-listener.bro
|
||||||
|
|
||||||
|
|
||||||
const broker_port: port = 9999/tcp &redef;
|
const broker_port: port = 9999/tcp &redef;
|
||||||
redef exit_only_after_terminate = T;
|
redef exit_only_after_terminate = T;
|
||||||
redef BrokerComm::endpoint_name = "listener";
|
redef BrokerComm::endpoint_name = "listener";
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
testlog.bro
|
testlog.bro
|
||||||
|
|
||||||
|
|
||||||
module Test;
|
module Test;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
|
|
|
@ -8,7 +8,7 @@ type Service: record {
|
||||||
rfc: count;
|
rfc: count;
|
||||||
};
|
};
|
||||||
|
|
||||||
function print_service(serv: Service): string
|
function print_service(serv: Service)
|
||||||
{
|
{
|
||||||
print fmt("Service: %s(RFC%d)",serv$name, serv$rfc);
|
print fmt("Service: %s(RFC%d)",serv$name, serv$rfc);
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ type System: record {
|
||||||
services: set[Service];
|
services: set[Service];
|
||||||
};
|
};
|
||||||
|
|
||||||
function print_service(serv: Service): string
|
function print_service(serv: Service)
|
||||||
{
|
{
|
||||||
print fmt(" Service: %s(RFC%d)",serv$name, serv$rfc);
|
print fmt(" Service: %s(RFC%d)",serv$name, serv$rfc);
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ function print_service(serv: Service): string
|
||||||
print fmt(" port: %s", p);
|
print fmt(" port: %s", p);
|
||||||
}
|
}
|
||||||
|
|
||||||
function print_system(sys: System): string
|
function print_system(sys: System)
|
||||||
{
|
{
|
||||||
print fmt("System: %s", sys$name);
|
print fmt("System: %s", sys$name);
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
connecting-connector.bro
|
connecting-connector.bro
|
||||||
|
|
||||||
|
|
||||||
const broker_port: port = 9999/tcp &redef;
|
const broker_port: port = 9999/tcp &redef;
|
||||||
redef exit_only_after_terminate = T;
|
redef exit_only_after_terminate = T;
|
||||||
redef BrokerComm::endpoint_name = "connector";
|
redef BrokerComm::endpoint_name = "connector";
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
connecting-listener.bro
|
connecting-listener.bro
|
||||||
|
|
||||||
|
|
||||||
const broker_port: port = 9999/tcp &redef;
|
const broker_port: port = 9999/tcp &redef;
|
||||||
redef exit_only_after_terminate = T;
|
redef exit_only_after_terminate = T;
|
||||||
redef BrokerComm::endpoint_name = "listener";
|
redef BrokerComm::endpoint_name = "listener";
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
events-listener.bro
|
events-listener.bro
|
||||||
|
|
||||||
|
|
||||||
const broker_port: port = 9999/tcp &redef;
|
const broker_port: port = 9999/tcp &redef;
|
||||||
redef exit_only_after_terminate = T;
|
redef exit_only_after_terminate = T;
|
||||||
redef BrokerComm::endpoint_name = "listener";
|
redef BrokerComm::endpoint_name = "listener";
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
printing-listener.bro
|
printing-listener.bro
|
||||||
|
|
||||||
|
|
||||||
const broker_port: port = 9999/tcp &redef;
|
const broker_port: port = 9999/tcp &redef;
|
||||||
redef exit_only_after_terminate = T;
|
redef exit_only_after_terminate = T;
|
||||||
redef BrokerComm::endpoint_name = "listener";
|
redef BrokerComm::endpoint_name = "listener";
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
testlog.bro
|
testlog.bro
|
||||||
|
|
||||||
|
|
||||||
module Test;
|
module Test;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
|
|
|
@ -8,7 +8,7 @@ type Service: record {
|
||||||
rfc: count;
|
rfc: count;
|
||||||
};
|
};
|
||||||
|
|
||||||
function print_service(serv: Service): string
|
function print_service(serv: Service)
|
||||||
{
|
{
|
||||||
print fmt("Service: %s(RFC%d)",serv$name, serv$rfc);
|
print fmt("Service: %s(RFC%d)",serv$name, serv$rfc);
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ type System: record {
|
||||||
services: set[Service];
|
services: set[Service];
|
||||||
};
|
};
|
||||||
|
|
||||||
function print_service(serv: Service): string
|
function print_service(serv: Service)
|
||||||
{
|
{
|
||||||
print fmt(" Service: %s(RFC%d)",serv$name, serv$rfc);
|
print fmt(" Service: %s(RFC%d)",serv$name, serv$rfc);
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ function print_service(serv: Service): string
|
||||||
print fmt(" port: %s", p);
|
print fmt(" port: %s", p);
|
||||||
}
|
}
|
||||||
|
|
||||||
function print_system(sys: System): string
|
function print_system(sys: System)
|
||||||
{
|
{
|
||||||
print fmt("System: %s", sys$name);
|
print fmt("System: %s", sys$name);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue