mirror of
https://github.com/zeek/zeek.git
synced 2025-10-09 18:18:19 +00:00
Merge branch 'master' into topic/jsiwek/broxygen
This commit is contained in:
commit
15b1904ca8
155 changed files with 1502 additions and 721 deletions
30
CHANGES
30
CHANGES
|
@ -1,4 +1,34 @@
|
||||||
|
|
||||||
|
2.2-beta-114 | 2013-10-18 14:17:57 -0700
|
||||||
|
|
||||||
|
* Moving the SQLite examples into separate Bro files to turn them
|
||||||
|
into sphinx-btest tests. (Robin Sommer)
|
||||||
|
|
||||||
|
2.2-beta-112 | 2013-10-18 13:47:13 -0700
|
||||||
|
|
||||||
|
* A larger chunk of documentation fixes and cleanup. (Daniel Thayer)
|
||||||
|
|
||||||
|
Apart from many smaller improves this includes in particular:
|
||||||
|
|
||||||
|
* Add README files for most Bro frameworks and base/protocols.
|
||||||
|
* Add README files for base/protocols.
|
||||||
|
* Update installation instructions.
|
||||||
|
* Improvements to file analysis docs and conversion to using
|
||||||
|
btest sphinx.
|
||||||
|
|
||||||
|
2.2-beta-80 | 2013-10-18 13:18:05 -0700
|
||||||
|
|
||||||
|
* SQLite reader/writer documentation. (Bernhard Amann)
|
||||||
|
|
||||||
|
* Check that the SQLite reader is only used in MANUAL reading mode.
|
||||||
|
(Bernhard Amann)
|
||||||
|
|
||||||
|
* Rename the SQLite writer "dbname" configuration option to
|
||||||
|
"tablename". (Bernhard Amann)
|
||||||
|
|
||||||
|
* Remove the "dbname" configuration option from the SQLite reader as
|
||||||
|
it wasn't used there. (Bernhard Amann)
|
||||||
|
|
||||||
2.2-beta-73 | 2013-10-14 14:28:25 -0700
|
2.2-beta-73 | 2013-10-14 14:28:25 -0700
|
||||||
|
|
||||||
* Fix misc. Coverity-reported issues (leaks, potential null pointer
|
* Fix misc. Coverity-reported issues (leaks, potential null pointer
|
||||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
2.2-beta-73
|
2.2-beta-114
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 2ccc76eee5565b7142e10f9b625a05e9932f459f
|
Subproject commit e8eda204f418c78cc35102db04602ad2ea94aff8
|
|
@ -31,40 +31,13 @@ some information about the file such as which network
|
||||||
:bro:see:`connection` and protocol are transporting the file, how many
|
:bro:see:`connection` and protocol are transporting the file, how many
|
||||||
bytes have been transferred so far, and its MIME type.
|
bytes have been transferred so far, and its MIME type.
|
||||||
|
|
||||||
.. code:: bro
|
Here's a simple example:
|
||||||
|
|
||||||
event connection_state_remove(c: connection)
|
.. btest-include:: ${DOC_ROOT}/frameworks/file_analysis_01.bro
|
||||||
{
|
|
||||||
print "connection_state_remove";
|
|
||||||
print c$uid;
|
|
||||||
print c$id;
|
|
||||||
for ( s in c$service )
|
|
||||||
print s;
|
|
||||||
}
|
|
||||||
|
|
||||||
event file_state_remove(f: fa_file)
|
.. btest:: file-analysis-01
|
||||||
{
|
|
||||||
print "file_state_remove";
|
|
||||||
print f$id;
|
|
||||||
for ( cid in f$conns )
|
|
||||||
{
|
|
||||||
print f$conns[cid]$uid;
|
|
||||||
print cid;
|
|
||||||
}
|
|
||||||
print f$source;
|
|
||||||
}
|
|
||||||
|
|
||||||
might give output like::
|
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/get.trace ${DOC_ROOT}/frameworks/file_analysis_01.bro
|
||||||
|
|
||||||
file_state_remove
|
|
||||||
Cx92a0ym5R8
|
|
||||||
REs2LQfVW2j
|
|
||||||
[orig_h=10.0.0.7, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]
|
|
||||||
HTTP
|
|
||||||
connection_state_remove
|
|
||||||
REs2LQfVW2j
|
|
||||||
[orig_h=10.0.0.7, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]
|
|
||||||
HTTP
|
|
||||||
|
|
||||||
This doesn't perform any interesting analysis yet, but does highlight
|
This doesn't perform any interesting analysis yet, but does highlight
|
||||||
the similarity between analysis of connections and files. Connections
|
the similarity between analysis of connections and files. Connections
|
||||||
|
@ -90,27 +63,16 @@ will write the contents of the file out to the local file system).
|
||||||
In the future there may be file analyzers that automatically attach to
|
In the future there may be file analyzers that automatically attach to
|
||||||
files based on heuristics, similar to the Dynamic Protocol Detection
|
files based on heuristics, similar to the Dynamic Protocol Detection
|
||||||
(DPD) framework for connections, but many will always require an
|
(DPD) framework for connections, but many will always require an
|
||||||
explicit attachment decision:
|
explicit attachment decision.
|
||||||
|
|
||||||
.. code:: bro
|
Here's a simple example of how to use the MD5 file analyzer to
|
||||||
|
calculate the MD5 of plain text files:
|
||||||
|
|
||||||
event file_new(f: fa_file)
|
.. btest-include:: ${DOC_ROOT}/frameworks/file_analysis_02.bro
|
||||||
{
|
|
||||||
print "new file", f$id;
|
|
||||||
if ( f?$mime_type && f$mime_type == "text/plain" )
|
|
||||||
Files::add_analyzer(f, Files::ANALYZER_MD5);
|
|
||||||
}
|
|
||||||
|
|
||||||
event file_hash(f: fa_file, kind: string, hash: string)
|
.. btest:: file-analysis-02
|
||||||
{
|
|
||||||
print "file_hash", f$id, kind, hash;
|
|
||||||
}
|
|
||||||
|
|
||||||
this script calculates MD5s for all plain text files and might give
|
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/get.trace ${DOC_ROOT}/frameworks/file_analysis_02.bro
|
||||||
output::
|
|
||||||
|
|
||||||
new file, Cx92a0ym5R8
|
|
||||||
file_hash, Cx92a0ym5R8, md5, 397168fd09991a0e712254df7bc639ac
|
|
||||||
|
|
||||||
Some file analyzers might have tunable parameters that need to be
|
Some file analyzers might have tunable parameters that need to be
|
||||||
specified in the call to :bro:see:`Files::add_analyzer`:
|
specified in the call to :bro:see:`Files::add_analyzer`:
|
||||||
|
@ -144,41 +106,19 @@ in the same way it analyzes files that it sees coming over traffic from
|
||||||
a network interface it's monitoring. It only requires a call to
|
a network interface it's monitoring. It only requires a call to
|
||||||
:bro:see:`Input::add_analysis`:
|
:bro:see:`Input::add_analysis`:
|
||||||
|
|
||||||
.. code:: bro
|
.. btest-include:: ${DOC_ROOT}/frameworks/file_analysis_03.bro
|
||||||
|
|
||||||
redef exit_only_after_terminate = T;
|
|
||||||
|
|
||||||
event file_new(f: fa_file)
|
|
||||||
{
|
|
||||||
print "new file", f$id;
|
|
||||||
Files::add_analyzer(f, Files::ANALYZER_MD5);
|
|
||||||
}
|
|
||||||
|
|
||||||
event file_state_remove(f: fa_file)
|
|
||||||
{
|
|
||||||
Input::remove(f$source);
|
|
||||||
terminate();
|
|
||||||
}
|
|
||||||
|
|
||||||
event file_hash(f: fa_file, kind: string, hash: string)
|
|
||||||
{
|
|
||||||
print "file_hash", f$id, kind, hash;
|
|
||||||
}
|
|
||||||
|
|
||||||
event bro_init()
|
|
||||||
{
|
|
||||||
local source: string = "./myfile";
|
|
||||||
Input::add_analysis([$source=source, $name=source]);
|
|
||||||
}
|
|
||||||
|
|
||||||
Note that the "source" field of :bro:see:`fa_file` corresponds to the
|
Note that the "source" field of :bro:see:`fa_file` corresponds to the
|
||||||
"name" field of :bro:see:`Input::AnalysisDescription` since that is what
|
"name" field of :bro:see:`Input::AnalysisDescription` since that is what
|
||||||
the input framework uses to uniquely identify an input stream.
|
the input framework uses to uniquely identify an input stream.
|
||||||
|
|
||||||
The output of the above script may be::
|
The output of the above script may be (assuming a file called "myfile"
|
||||||
|
exists):
|
||||||
|
|
||||||
new file, G1fS2xthS4l
|
.. btest:: file-analysis-03
|
||||||
file_hash, G1fS2xthS4l, md5, 54098b367d2e87b078671fad4afb9dbb
|
|
||||||
|
@TEST-EXEC: echo "Hello world" > myfile
|
||||||
|
@TEST-EXEC: btest-rst-cmd bro ${DOC_ROOT}/frameworks/file_analysis_03.bro
|
||||||
|
|
||||||
Nothing that special, but it at least verifies the MD5 file analyzer
|
Nothing that special, but it at least verifies the MD5 file analyzer
|
||||||
saw all the bytes of the input file and calculated the checksum
|
saw all the bytes of the input file and calculated the checksum
|
||||||
|
|
20
doc/frameworks/file_analysis_01.bro
Normal file
20
doc/frameworks/file_analysis_01.bro
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
event connection_state_remove(c: connection)
|
||||||
|
{
|
||||||
|
print "connection_state_remove";
|
||||||
|
print c$uid;
|
||||||
|
print c$id;
|
||||||
|
for ( s in c$service )
|
||||||
|
print s;
|
||||||
|
}
|
||||||
|
|
||||||
|
event file_state_remove(f: fa_file)
|
||||||
|
{
|
||||||
|
print "file_state_remove";
|
||||||
|
print f$id;
|
||||||
|
for ( cid in f$conns )
|
||||||
|
{
|
||||||
|
print f$conns[cid]$uid;
|
||||||
|
print cid;
|
||||||
|
}
|
||||||
|
print f$source;
|
||||||
|
}
|
11
doc/frameworks/file_analysis_02.bro
Normal file
11
doc/frameworks/file_analysis_02.bro
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
event file_new(f: fa_file)
|
||||||
|
{
|
||||||
|
print "new file", f$id;
|
||||||
|
if ( f?$mime_type && f$mime_type == "text/plain" )
|
||||||
|
Files::add_analyzer(f, Files::ANALYZER_MD5);
|
||||||
|
}
|
||||||
|
|
||||||
|
event file_hash(f: fa_file, kind: string, hash: string)
|
||||||
|
{
|
||||||
|
print "file_hash", f$id, kind, hash;
|
||||||
|
}
|
25
doc/frameworks/file_analysis_03.bro
Normal file
25
doc/frameworks/file_analysis_03.bro
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
redef exit_only_after_terminate = T;
|
||||||
|
|
||||||
|
event file_new(f: fa_file)
|
||||||
|
{
|
||||||
|
print "new file", f$id;
|
||||||
|
Files::add_analyzer(f, Files::ANALYZER_MD5);
|
||||||
|
}
|
||||||
|
|
||||||
|
event file_state_remove(f: fa_file)
|
||||||
|
{
|
||||||
|
print "file_state_remove";
|
||||||
|
Input::remove(f$source);
|
||||||
|
terminate();
|
||||||
|
}
|
||||||
|
|
||||||
|
event file_hash(f: fa_file, kind: string, hash: string)
|
||||||
|
{
|
||||||
|
print "file_hash", f$id, kind, hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
event bro_init()
|
||||||
|
{
|
||||||
|
local source: string = "./myfile";
|
||||||
|
Input::add_analysis([$source=source, $name=source]);
|
||||||
|
}
|
|
@ -11,10 +11,41 @@ GeoLocation
|
||||||
to find the geographic location for an IP address. Bro has support
|
to find the geographic location for an IP address. Bro has support
|
||||||
for the `GeoIP library <http://www.maxmind.com/app/c>`__ at the
|
for the `GeoIP library <http://www.maxmind.com/app/c>`__ at the
|
||||||
policy script level beginning with release 1.3 to account for this
|
policy script level beginning with release 1.3 to account for this
|
||||||
need.
|
need. To use this functionality, you need to first install the libGeoIP
|
||||||
|
software, and then install the GeoLite city database before building
|
||||||
|
Bro.
|
||||||
|
|
||||||
.. contents::
|
.. contents::
|
||||||
|
|
||||||
|
Install libGeoIP
|
||||||
|
----------------
|
||||||
|
|
||||||
|
* FreeBSD:
|
||||||
|
|
||||||
|
.. console::
|
||||||
|
|
||||||
|
sudo pkg_add -r GeoIP
|
||||||
|
|
||||||
|
* RPM/RedHat-based Linux:
|
||||||
|
|
||||||
|
.. console::
|
||||||
|
|
||||||
|
sudo yum install GeoIP-devel
|
||||||
|
|
||||||
|
* DEB/Debian-based Linux:
|
||||||
|
|
||||||
|
.. console::
|
||||||
|
|
||||||
|
sudo apt-get install libgeoip-dev
|
||||||
|
|
||||||
|
* Mac OS X:
|
||||||
|
|
||||||
|
Vanilla OS X installations don't ship with libGeoIP, but if
|
||||||
|
installed from your preferred package management system (e.g.
|
||||||
|
MacPorts, Fink, or Homebrew), they should be automatically detected
|
||||||
|
and Bro will compile against them.
|
||||||
|
|
||||||
|
|
||||||
GeoIPLite Database Installation
|
GeoIPLite Database Installation
|
||||||
------------------------------------
|
------------------------------------
|
||||||
|
|
||||||
|
@ -22,39 +53,23 @@ A country database for GeoIPLite is included when you do the C API
|
||||||
install, but for Bro, we are using the city database which includes
|
install, but for Bro, we are using the city database which includes
|
||||||
cities and regions in addition to countries.
|
cities and regions in addition to countries.
|
||||||
|
|
||||||
`Download <http://www.maxmind.com/app/geolitecity>`__ the geolitecity
|
`Download <http://www.maxmind.com/app/geolitecity>`__ the GeoLite city
|
||||||
binary database and follow the directions to install it.
|
binary database.
|
||||||
|
|
||||||
FreeBSD Quick Install
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
.. console::
|
.. console::
|
||||||
|
|
||||||
pkg_add -r GeoIP
|
|
||||||
wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
|
wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
|
||||||
gunzip GeoLiteCity.dat.gz
|
gunzip GeoLiteCity.dat.gz
|
||||||
mv GeoLiteCity.dat /usr/local/share/GeoIP/GeoIPCity.dat
|
|
||||||
|
|
||||||
# Set your environment correctly before running Bro's configure script
|
Next, the file needs to be put in the database directory. This directory
|
||||||
export CFLAGS=-I/usr/local/include
|
should already exist and will vary depending on which platform and package
|
||||||
export LDFLAGS=-L/usr/local/lib
|
you are using. For FreeBSD, use ``/usr/local/share/GeoIP``. For Linux,
|
||||||
|
use ``/usr/share/GeoIP`` or ``/var/lib/GeoIP`` (choose whichever one
|
||||||
|
already exists).
|
||||||
CentOS Quick Install
|
|
||||||
--------------------
|
|
||||||
|
|
||||||
.. console::
|
.. console::
|
||||||
|
|
||||||
yum install GeoIP-devel
|
mv GeoLiteCity.dat <path_to_database_dir>/GeoIPCity.dat
|
||||||
|
|
||||||
wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
|
|
||||||
gunzip GeoLiteCity.dat.gz
|
|
||||||
mkdir -p /var/lib/GeoIP/
|
|
||||||
mv GeoLiteCity.dat /var/lib/GeoIP/GeoIPCity.dat
|
|
||||||
|
|
||||||
# Set your environment correctly before running Bro's configure script
|
|
||||||
export CFLAGS=-I/usr/local/include
|
|
||||||
export LDFLAGS=-L/usr/local/lib
|
|
||||||
|
|
||||||
|
|
||||||
Usage
|
Usage
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
|
|
||||||
|
.. _framework-input:
|
||||||
|
|
||||||
===============
|
===============
|
||||||
Input Framework
|
Input Framework
|
||||||
===============
|
===============
|
||||||
|
@ -260,8 +262,13 @@ to optimize the speed of the input framework. It can generate arbitrary
|
||||||
amounts of semi-random data in all Bro data types supported by the input
|
amounts of semi-random data in all Bro data types supported by the input
|
||||||
framework.
|
framework.
|
||||||
|
|
||||||
In the future, the input framework will get support for new data sources
|
Currently, Bro supports the following readers in addition to the
|
||||||
like, for example, different databases.
|
aforementioned ones:
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 1
|
||||||
|
|
||||||
|
logging-input-sqlite
|
||||||
|
|
||||||
Add_table options
|
Add_table options
|
||||||
-----------------
|
-----------------
|
||||||
|
|
|
@ -31,12 +31,12 @@ Once extracted, start ElasticSearch with::
|
||||||
# ./bin/elasticsearch
|
# ./bin/elasticsearch
|
||||||
|
|
||||||
For more detailed information, refer to the ElasticSearch installation
|
For more detailed information, refer to the ElasticSearch installation
|
||||||
documentation: http://www.elasticsearch.org/guide/reference/setup/installation.html
|
documentation: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html
|
||||||
|
|
||||||
Compiling Bro with ElasticSearch Support
|
Compiling Bro with ElasticSearch Support
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
|
|
||||||
First, ensure that you have libcurl installed the run configure.::
|
First, ensure that you have libcurl installed then run configure::
|
||||||
|
|
||||||
# ./configure
|
# ./configure
|
||||||
[...]
|
[...]
|
||||||
|
@ -51,9 +51,9 @@ First, ensure that you have libcurl installed the run configure.::
|
||||||
Activating ElasticSearch
|
Activating ElasticSearch
|
||||||
------------------------
|
------------------------
|
||||||
|
|
||||||
The easiest way to enable ElasticSearch output is to load the tuning/logs-to-
|
The easiest way to enable ElasticSearch output is to load the
|
||||||
elasticsearch.bro script. If you are using BroControl, the following line in
|
tuning/logs-to-elasticsearch.bro script. If you are using BroControl,
|
||||||
local.bro will enable it.
|
the following line in local.bro will enable it:
|
||||||
|
|
||||||
.. console::
|
.. console::
|
||||||
|
|
||||||
|
@ -76,7 +76,7 @@ A common problem encountered with ElasticSearch is too many files being held
|
||||||
open. The ElasticSearch website has some suggestions on how to increase the
|
open. The ElasticSearch website has some suggestions on how to increase the
|
||||||
open file limit.
|
open file limit.
|
||||||
|
|
||||||
- http://www.elasticsearch.org/tutorials/2011/04/06/too-many-open-files.html
|
- http://www.elasticsearch.org/tutorials/too-many-open-files/
|
||||||
|
|
||||||
TODO
|
TODO
|
||||||
----
|
----
|
||||||
|
|
166
doc/frameworks/logging-input-sqlite.rst
Normal file
166
doc/frameworks/logging-input-sqlite.rst
Normal file
|
@ -0,0 +1,166 @@
|
||||||
|
|
||||||
|
============================================
|
||||||
|
Logging To and Reading From SQLite Databases
|
||||||
|
============================================
|
||||||
|
|
||||||
|
.. rst-class:: opening
|
||||||
|
|
||||||
|
Starting with version 2.2, Bro features a SQLite logging writer
|
||||||
|
as well as a SQLite input reader. SQLite is a simple, file-based,
|
||||||
|
widely used SQL database system. Using SQLite allows Bro to write
|
||||||
|
and access data in a format that is easy to use in interchange with
|
||||||
|
other applications. Due to the transactional nature of SQLite,
|
||||||
|
databases can be used by several applications simultaneously. Hence,
|
||||||
|
they can, for example, be used to make data that changes regularly available
|
||||||
|
to Bro on a continuing basis.
|
||||||
|
|
||||||
|
.. contents::
|
||||||
|
|
||||||
|
Warning
|
||||||
|
=======
|
||||||
|
|
||||||
|
In contrast to the ASCII reader and writer, the SQLite plugins have not yet
|
||||||
|
seen extensive use in production environments. While we are not aware
|
||||||
|
of any issues with them, we urge to caution when using them
|
||||||
|
in production environments. There could be lingering issues which only occur
|
||||||
|
when the plugins are used with high amounts of data or in high-load environments.
|
||||||
|
|
||||||
|
Logging Data into SQLite Databases
|
||||||
|
==================================
|
||||||
|
|
||||||
|
Logging support for SQLite is available in all Bro installations starting with
|
||||||
|
version 2.2. There is no need to load any additional scripts or for any compile-time
|
||||||
|
configurations.
|
||||||
|
|
||||||
|
Sending data from existing logging streams to SQLite is rather straightforward. You
|
||||||
|
have to define a filter which specifies SQLite as the writer.
|
||||||
|
|
||||||
|
The following example code adds SQLite as a filter for the connection log:
|
||||||
|
|
||||||
|
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-conn-filter.bro
|
||||||
|
|
||||||
|
.. btest:: sqlite-conn-filter-check
|
||||||
|
|
||||||
|
# Make sure this parses correctly at least.
|
||||||
|
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-conn-filter.bro
|
||||||
|
|
||||||
|
Bro will create the database file ``/var/db/conn.sqlite``, if it does not already exist.
|
||||||
|
It will also create a table with the name ``conn`` (if it does not exist) and start
|
||||||
|
appending connection information to the table.
|
||||||
|
|
||||||
|
At the moment, SQLite databases are not rotated the same way ASCII log-files are. You
|
||||||
|
have to take care to create them in an adequate location.
|
||||||
|
|
||||||
|
If you examine the resulting SQLite database, the schema will contain the same fields
|
||||||
|
that are present in the ASCII log files::
|
||||||
|
|
||||||
|
# sqlite3 /var/db/conn.sqlite
|
||||||
|
|
||||||
|
SQLite version 3.8.0.2 2013-09-03 17:11:13
|
||||||
|
Enter ".help" for instructions
|
||||||
|
Enter SQL statements terminated with a ";"
|
||||||
|
sqlite> .schema
|
||||||
|
CREATE TABLE conn (
|
||||||
|
'ts' double precision,
|
||||||
|
'uid' text,
|
||||||
|
'id.orig_h' text,
|
||||||
|
'id.orig_p' integer,
|
||||||
|
...
|
||||||
|
|
||||||
|
Note that the ASCII ``conn.log`` will still be created. To disable the ASCII writer for a
|
||||||
|
log stream, you can remove the default filter:
|
||||||
|
|
||||||
|
.. code:: bro
|
||||||
|
|
||||||
|
Log::remove_filter(Conn::LOG, "default");
|
||||||
|
|
||||||
|
|
||||||
|
To create a custom SQLite log file, you have to create a new log stream that contains
|
||||||
|
just the information you want to commit to the database. Please refer to the
|
||||||
|
:ref:`framework-logging` documentation on how to create custom log streams.
|
||||||
|
|
||||||
|
Reading Data from SQLite Databases
|
||||||
|
==================================
|
||||||
|
|
||||||
|
Like logging support, support for reading data from SQLite databases is built into Bro starting
|
||||||
|
with version 2.2.
|
||||||
|
|
||||||
|
Just as with the text-based input readers (please refer to the :ref:`framework-input`
|
||||||
|
documentation for them and for basic information on how to use the input-framework), the SQLite reader
|
||||||
|
can be used to read data - in this case the result of SQL queries - into tables or into events.
|
||||||
|
|
||||||
|
Reading Data into Tables
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
To read data from a SQLite database, we first have to provide Bro with the information, how
|
||||||
|
the resulting data will be structured. For this example, we expect that we have a SQLite database,
|
||||||
|
which contains host IP addresses and the user accounts that are allowed to log into a specific
|
||||||
|
machine.
|
||||||
|
|
||||||
|
The SQLite commands to create the schema are as follows::
|
||||||
|
|
||||||
|
create table machines_to_users (
|
||||||
|
host text unique not null,
|
||||||
|
users text not null);
|
||||||
|
|
||||||
|
insert into machines_to_users values ('192.168.17.1', 'bernhard,matthias,seth');
|
||||||
|
insert into machines_to_users values ('192.168.17.2', 'bernhard');
|
||||||
|
insert into machines_to_users values ('192.168.17.3', 'seth,matthias');
|
||||||
|
|
||||||
|
After creating a file called ``hosts.sqlite`` with this content, we can read the resulting table
|
||||||
|
into Bro:
|
||||||
|
|
||||||
|
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-read-table.bro
|
||||||
|
|
||||||
|
.. btest:: sqlite-read-table-check
|
||||||
|
|
||||||
|
# Make sure this parses correctly at least.
|
||||||
|
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-read-table.bro
|
||||||
|
|
||||||
|
Afterwards, that table can be used to check logins into hosts against the available
|
||||||
|
userlist.
|
||||||
|
|
||||||
|
Turning Data into Events
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
The second mode is to use the SQLite reader to output the input data as events. Typically there
|
||||||
|
are two reasons to do this. First, when the structure of the input data is too complicated
|
||||||
|
for a direct table import. In this case, the data can be read into an event which can then
|
||||||
|
create the necessary data structures in Bro in scriptland.
|
||||||
|
|
||||||
|
The second reason is, that the dataset is too big to hold it in memory. In this case, the checks
|
||||||
|
can be performed on-demand, when Bro encounters a situation where it needs additional information.
|
||||||
|
|
||||||
|
An example for this would be an internal huge database with malware hashes. Live database queries
|
||||||
|
could be used to check the sporadically happening downloads against the database.
|
||||||
|
|
||||||
|
The SQLite commands to create the schema are as follows::
|
||||||
|
|
||||||
|
create table malware_hashes (
|
||||||
|
hash text unique not null,
|
||||||
|
description text not null);
|
||||||
|
|
||||||
|
insert into malware_hashes values ('86f7e437faa5a7fce15d1ddcb9eaeaea377667b8', 'malware a');
|
||||||
|
insert into malware_hashes values ('e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98', 'malware b');
|
||||||
|
insert into malware_hashes values ('84a516841ba77a5b4648de2cd0dfcb30ea46dbb4', 'malware c');
|
||||||
|
insert into malware_hashes values ('3c363836cf4e16666669a25da280a1865c2d2874', 'malware d');
|
||||||
|
insert into malware_hashes values ('58e6b3a414a1e090dfc6029add0f3555ccba127f', 'malware e');
|
||||||
|
insert into malware_hashes values ('4a0a19218e082a343a1b17e5333409af9d98f0f5', 'malware f');
|
||||||
|
insert into malware_hashes values ('54fd1711209fb1c0781092374132c66e79e2241b', 'malware g');
|
||||||
|
insert into malware_hashes values ('27d5482eebd075de44389774fce28c69f45c8a75', 'malware h');
|
||||||
|
insert into malware_hashes values ('73f45106968ff8dc51fba105fa91306af1ff6666', 'ftp-trace');
|
||||||
|
|
||||||
|
|
||||||
|
The following code uses the file-analysis framework to get the sha1 hashes of files that are
|
||||||
|
transmitted over the network. For each hash, a SQL-query is run against SQLite. If the query
|
||||||
|
returns with a result, we had a hit against our malware-database and output the matching hash.
|
||||||
|
|
||||||
|
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-read-events.bro
|
||||||
|
|
||||||
|
.. btest:: sqlite-read-events-check
|
||||||
|
|
||||||
|
# Make sure this parses correctly at least.
|
||||||
|
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-read-events.bro
|
||||||
|
|
||||||
|
If you run this script against the trace in ``testing/btest/Traces/ftp/ipv4.trace``, you
|
||||||
|
will get one hit.
|
|
@ -387,3 +387,4 @@ Bro supports the following output formats other than ASCII:
|
||||||
|
|
||||||
logging-dataseries
|
logging-dataseries
|
||||||
logging-elasticsearch
|
logging-elasticsearch
|
||||||
|
logging-input-sqlite
|
||||||
|
|
12
doc/frameworks/sqlite-conn-filter.bro
Normal file
12
doc/frameworks/sqlite-conn-filter.bro
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
event bro_init()
|
||||||
|
{
|
||||||
|
local filter: Log::Filter =
|
||||||
|
[
|
||||||
|
$name="sqlite",
|
||||||
|
$path="/var/db/conn",
|
||||||
|
$config=table(["tablename"] = "conn"),
|
||||||
|
$writer=Log::WRITER_SQLITE
|
||||||
|
];
|
||||||
|
|
||||||
|
Log::add_filter(Conn::LOG, filter);
|
||||||
|
}
|
40
doc/frameworks/sqlite-read-events.bro
Normal file
40
doc/frameworks/sqlite-read-events.bro
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
@load frameworks/files/hash-all-files
|
||||||
|
|
||||||
|
type Val: record {
|
||||||
|
hash: string;
|
||||||
|
description: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
event line(description: Input::EventDescription, tpe: Input::Event, r: Val)
|
||||||
|
{
|
||||||
|
print fmt("malware-hit with hash %s, description %s", r$hash, r$description);
|
||||||
|
}
|
||||||
|
|
||||||
|
global malware_source = "/var/db/malware";
|
||||||
|
|
||||||
|
event file_hash(f: fa_file, kind: string, hash: string)
|
||||||
|
{
|
||||||
|
|
||||||
|
# check all sha1 hashes
|
||||||
|
if ( kind=="sha1" )
|
||||||
|
{
|
||||||
|
Input::add_event(
|
||||||
|
[
|
||||||
|
$source=malware_source,
|
||||||
|
$name=hash,
|
||||||
|
$fields=Val,
|
||||||
|
$ev=line,
|
||||||
|
$want_record=T,
|
||||||
|
$config=table(
|
||||||
|
["query"] = fmt("select * from malware_hashes where hash='%s';", hash)
|
||||||
|
),
|
||||||
|
$reader=Input::READER_SQLITE
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
event Input::end_of_data(name: string, source:string)
|
||||||
|
{
|
||||||
|
if ( source == malware_source )
|
||||||
|
Input::remove(name);
|
||||||
|
}
|
35
doc/frameworks/sqlite-read-table.bro
Normal file
35
doc/frameworks/sqlite-read-table.bro
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
type Idx: record {
|
||||||
|
host: addr;
|
||||||
|
};
|
||||||
|
|
||||||
|
type Val: record {
|
||||||
|
users: set[string];
|
||||||
|
};
|
||||||
|
|
||||||
|
global hostslist: table[addr] of Val = table();
|
||||||
|
|
||||||
|
event bro_init()
|
||||||
|
{
|
||||||
|
Input::add_table([$source="/var/db/hosts",
|
||||||
|
$name="hosts",
|
||||||
|
$idx=Idx,
|
||||||
|
$val=Val,
|
||||||
|
$destination=hostslist,
|
||||||
|
$reader=Input::READER_SQLITE,
|
||||||
|
$config=table(["query"] = "select * from machines_to_users;")
|
||||||
|
]);
|
||||||
|
|
||||||
|
Input::remove("hosts");
|
||||||
|
}
|
||||||
|
|
||||||
|
event Input::end_of_data(name: string, source: string)
|
||||||
|
{
|
||||||
|
if ( name != "hosts" )
|
||||||
|
return;
|
||||||
|
|
||||||
|
# now all data is in the table
|
||||||
|
print "Hosts list has been successfully imported";
|
||||||
|
|
||||||
|
# List the users of one host.
|
||||||
|
print hostslist[192.168.17.1]$users;
|
||||||
|
}
|
|
@ -32,6 +32,7 @@ before you begin:
|
||||||
* Libmagic 5.04 or greater
|
* Libmagic 5.04 or greater
|
||||||
* Libz
|
* Libz
|
||||||
* Bash (for BroControl)
|
* Bash (for BroControl)
|
||||||
|
* Python (for BroControl)
|
||||||
|
|
||||||
To build Bro from source, the following additional dependencies are required:
|
To build Bro from source, the following additional dependencies are required:
|
||||||
|
|
||||||
|
@ -47,7 +48,8 @@ To build Bro from source, the following additional dependencies are required:
|
||||||
* zlib headers
|
* zlib headers
|
||||||
* Perl
|
* Perl
|
||||||
|
|
||||||
To install the required dependencies, you can use:
|
To install the required dependencies, you can use (when done, make sure
|
||||||
|
that ``bash`` and ``python`` are in your ``PATH``):
|
||||||
|
|
||||||
* RPM/RedHat-based Linux:
|
* RPM/RedHat-based Linux:
|
||||||
|
|
||||||
|
@ -68,11 +70,7 @@ To install the required dependencies, you can use:
|
||||||
|
|
||||||
.. console::
|
.. console::
|
||||||
|
|
||||||
sudo pkg_add -r bash cmake swig bison python
|
sudo pkg_add -r bash cmake swig bison python perl
|
||||||
|
|
||||||
Note that ``bash`` needs to be in ``PATH``, which by default it is
|
|
||||||
not. The FreeBSD package installs the binary into
|
|
||||||
``/usr/local/bin``.
|
|
||||||
|
|
||||||
* Mac OS X:
|
* Mac OS X:
|
||||||
|
|
||||||
|
@ -87,8 +85,8 @@ To install the required dependencies, you can use:
|
||||||
preferred Mac OS X package management system (e.g. MacPorts_, Fink_,
|
preferred Mac OS X package management system (e.g. MacPorts_, Fink_,
|
||||||
or Homebrew_).
|
or Homebrew_).
|
||||||
|
|
||||||
Specifically for MacPorts, the ``swig``, ``swig-ruby``, ``swig-python``
|
Specifically for MacPorts, the ``cmake``, ``swig``, ``swig-ruby``,
|
||||||
and ``file`` packages provide the required dependencies.
|
``swig-python`` and ``file`` packages provide the required dependencies.
|
||||||
|
|
||||||
|
|
||||||
Optional Dependencies
|
Optional Dependencies
|
||||||
|
@ -98,45 +96,14 @@ Bro can make use of some optional libraries and tools if they are found at
|
||||||
build time:
|
build time:
|
||||||
|
|
||||||
* LibGeoIP (for geo-locating IP addresses)
|
* LibGeoIP (for geo-locating IP addresses)
|
||||||
|
* sendmail (enables Bro and BroControl to send mail)
|
||||||
* gperftools (tcmalloc is used to improve memory and CPU usage)
|
* gperftools (tcmalloc is used to improve memory and CPU usage)
|
||||||
* ipsumdump (for trace-summary; http://www.cs.ucla.edu/~kohler/ipsumdump)
|
* ipsumdump (for trace-summary; http://www.cs.ucla.edu/~kohler/ipsumdump)
|
||||||
* Ruby executable, library, and headers (for Broccoli Ruby bindings)
|
* Ruby executable, library, and headers (for Broccoli Ruby bindings)
|
||||||
|
|
||||||
LibGeoIP is probably the most interesting and can be easily installed
|
LibGeoIP is probably the most interesting and can be installed
|
||||||
on most platforms:
|
on most platforms by following the instructions for :ref:`installing
|
||||||
|
libGeoIP and the GeoIP database
|
||||||
* RedHat Enterprise Linux:
|
|
||||||
|
|
||||||
.. console::
|
|
||||||
|
|
||||||
sudo yum install geoip-devel sendmail
|
|
||||||
|
|
||||||
* CentOS Linux:
|
|
||||||
|
|
||||||
.. console::
|
|
||||||
|
|
||||||
sudo yum install GeoIP-devel sendmail
|
|
||||||
|
|
||||||
* DEB/Debian-based Linux:
|
|
||||||
|
|
||||||
.. console::
|
|
||||||
|
|
||||||
sudo apt-get install libgeoip-dev sendmail
|
|
||||||
|
|
||||||
* FreeBSD using ports:
|
|
||||||
|
|
||||||
.. console::
|
|
||||||
|
|
||||||
sudo pkg_add -r GeoIP
|
|
||||||
|
|
||||||
* Mac OS X:
|
|
||||||
|
|
||||||
Vanilla OS X installations don't ship with libGeoIP, but if
|
|
||||||
installed from your preferred package management system (e.g.
|
|
||||||
MacPorts, Fink, or Homebrew), they should be automatically detected
|
|
||||||
and Bro will compile against them.
|
|
||||||
|
|
||||||
Additional steps may be needed to :ref:`get the right GeoIP database
|
|
||||||
<geolocation>`.
|
<geolocation>`.
|
||||||
|
|
||||||
|
|
||||||
|
@ -217,7 +184,7 @@ turn off unwanted auxiliary projects that would otherwise be installed
|
||||||
automatically. Finally, use ``make install-aux`` to install some of
|
automatically. Finally, use ``make install-aux`` to install some of
|
||||||
the other programs that are in the ``aux/bro-aux`` directory.
|
the other programs that are in the ``aux/bro-aux`` directory.
|
||||||
|
|
||||||
OpenBSD users, please see our at `FAQ
|
OpenBSD users, please see our `FAQ
|
||||||
<http://www.bro.org/documentation/faq.html>`_ if you are having
|
<http://www.bro.org/documentation/faq.html>`_ if you are having
|
||||||
problems installing Bro.
|
problems installing Bro.
|
||||||
|
|
||||||
|
|
|
@ -351,7 +351,7 @@ decrypted from HTTP streams is stored in
|
||||||
excerpt from :doc:`/scripts/base/protocols/http/main` below.
|
excerpt from :doc:`/scripts/base/protocols/http/main` below.
|
||||||
|
|
||||||
.. btest-include:: ${BRO_SRC_ROOT}/scripts/base/protocols/http/main.bro
|
.. btest-include:: ${BRO_SRC_ROOT}/scripts/base/protocols/http/main.bro
|
||||||
:lines: 8-10,19,20,118
|
:lines: 8-10,19-21,120
|
||||||
|
|
||||||
Because the constant was declared with the ``&redef`` attribute, if we
|
Because the constant was declared with the ``&redef`` attribute, if we
|
||||||
needed to turn this option on globally, we could do so by adding the
|
needed to turn this option on globally, we could do so by adding the
|
||||||
|
@ -810,7 +810,7 @@ example of the ``record`` data type in the earlier sections, the
|
||||||
``conn.log``, is shown by the excerpt below.
|
``conn.log``, is shown by the excerpt below.
|
||||||
|
|
||||||
.. btest-include:: ${BRO_SRC_ROOT}/scripts/base/protocols/conn/main.bro
|
.. btest-include:: ${BRO_SRC_ROOT}/scripts/base/protocols/conn/main.bro
|
||||||
:lines: 10-12,16,17,19,21,23,25,28,31,35,37,56,62,68,90,93,97,100,104,108,109,114
|
:lines: 10-12,16-17,19,21,23,25,28,31,35,38,57,63,69,92,95,99,102,106,110-111,116
|
||||||
|
|
||||||
Looking at the structure of the definition, a new collection of data
|
Looking at the structure of the definition, a new collection of data
|
||||||
types is being defined as a type called ``Info``. Since this type
|
types is being defined as a type called ``Info``. Since this type
|
||||||
|
|
|
@ -8,18 +8,21 @@ export {
|
||||||
const prefix = "./extract_files/" &redef;
|
const prefix = "./extract_files/" &redef;
|
||||||
|
|
||||||
## The default max size for extracted files (they won't exceed this
|
## The default max size for extracted files (they won't exceed this
|
||||||
## number of bytes), unlimited.
|
## number of bytes). A value of zero means unlimited.
|
||||||
const default_limit = 0 &redef;
|
const default_limit = 0 &redef;
|
||||||
|
|
||||||
redef record Files::Info += {
|
redef record Files::Info += {
|
||||||
## Local filenames of extracted file.
|
## Local filename of extracted file.
|
||||||
extracted: string &optional &log;
|
extracted: string &optional &log;
|
||||||
};
|
};
|
||||||
|
|
||||||
redef record Files::AnalyzerArgs += {
|
redef record Files::AnalyzerArgs += {
|
||||||
## The local filename to which to write an extracted file.
|
## The local filename to which to write an extracted file.
|
||||||
## This field is used in the core by the extraction plugin
|
## This field is used in the core by the extraction plugin
|
||||||
## to know where to write the file to. It's also optional
|
## to know where to write the file to. If not specified, then
|
||||||
|
## a filename in the format "extract-<source>-<id>" is
|
||||||
|
## automatically assigned (using the *source* and *id*
|
||||||
|
## fields of :bro:see:`fa_file`).
|
||||||
extract_filename: string &optional;
|
extract_filename: string &optional;
|
||||||
## The maximum allowed file size in bytes of *extract_filename*.
|
## The maximum allowed file size in bytes of *extract_filename*.
|
||||||
## Once reached, a :bro:see:`file_extraction_limit` event is
|
## Once reached, a :bro:see:`file_extraction_limit` event is
|
||||||
|
|
|
@ -42,17 +42,17 @@ export {
|
||||||
sensor_id: count &log;
|
sensor_id: count &log;
|
||||||
## Sig id for this generator.
|
## Sig id for this generator.
|
||||||
signature_id: count &log;
|
signature_id: count &log;
|
||||||
## A string representation of the "signature_id" field if a sid_msg.map file was loaded.
|
## A string representation of the *signature_id* field if a sid_msg.map file was loaded.
|
||||||
signature: string &log &optional;
|
signature: string &log &optional;
|
||||||
## Which generator generated the alert?
|
## Which generator generated the alert?
|
||||||
generator_id: count &log;
|
generator_id: count &log;
|
||||||
## A string representation of the "generator_id" field if a gen_msg.map file was loaded.
|
## A string representation of the *generator_id* field if a gen_msg.map file was loaded.
|
||||||
generator: string &log &optional;
|
generator: string &log &optional;
|
||||||
## Sig revision for this id.
|
## Sig revision for this id.
|
||||||
signature_revision: count &log;
|
signature_revision: count &log;
|
||||||
## Event classification.
|
## Event classification.
|
||||||
classification_id: count &log;
|
classification_id: count &log;
|
||||||
## Descriptive classification string,
|
## Descriptive classification string.
|
||||||
classification: string &log &optional;
|
classification: string &log &optional;
|
||||||
## Event priority.
|
## Event priority.
|
||||||
priority_id: count &log;
|
priority_id: count &log;
|
||||||
|
|
3
scripts/base/frameworks/analyzer/README
Normal file
3
scripts/base/frameworks/analyzer/README
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
The analyzer framework allows to dynamically enable or disable Bro's
|
||||||
|
protocol analyzers, as well as to manage the well-known ports which
|
||||||
|
automatically activate a particular analyzer for new connections.
|
|
@ -5,8 +5,8 @@
|
||||||
##! particular analyzer for new connections.
|
##! particular analyzer for new connections.
|
||||||
##!
|
##!
|
||||||
##! Protocol analyzers are identified by unique tags of type
|
##! Protocol analyzers are identified by unique tags of type
|
||||||
##! :bro:type:`Analyzer::Tag`, such as :bro:enum:`Analyzer::ANALYZER_HTTP` and
|
##! :bro:type:`Analyzer::Tag`, such as :bro:enum:`Analyzer::ANALYZER_HTTP`.
|
||||||
##! :bro:enum:`Analyzer::ANALYZER_HTTP`. These tags are defined internally by
|
##! These tags are defined internally by
|
||||||
##! the analyzers themselves, and documented in their analyzer-specific
|
##! the analyzers themselves, and documented in their analyzer-specific
|
||||||
##! description along with the events that they generate.
|
##! description along with the events that they generate.
|
||||||
|
|
||||||
|
@ -15,8 +15,8 @@
|
||||||
module Analyzer;
|
module Analyzer;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## If true, all available analyzers are initially disabled at startup. One
|
## If true, all available analyzers are initially disabled at startup.
|
||||||
## can then selectively enable them with
|
## One can then selectively enable them with
|
||||||
## :bro:id:`Analyzer::enable_analyzer`.
|
## :bro:id:`Analyzer::enable_analyzer`.
|
||||||
global disable_all = F &redef;
|
global disable_all = F &redef;
|
||||||
|
|
||||||
|
@ -45,7 +45,7 @@ export {
|
||||||
##
|
##
|
||||||
## ports: The set of well-known ports to associate with the analyzer.
|
## ports: The set of well-known ports to associate with the analyzer.
|
||||||
##
|
##
|
||||||
## Returns: True if the ports were sucessfully registered.
|
## Returns: True if the ports were successfully registered.
|
||||||
global register_for_ports: function(tag: Analyzer::Tag, ports: set[port]) : bool;
|
global register_for_ports: function(tag: Analyzer::Tag, ports: set[port]) : bool;
|
||||||
|
|
||||||
## Registers an individual well-known port for an analyzer. If a future
|
## Registers an individual well-known port for an analyzer. If a future
|
||||||
|
@ -57,7 +57,7 @@ export {
|
||||||
##
|
##
|
||||||
## p: The well-known port to associate with the analyzer.
|
## p: The well-known port to associate with the analyzer.
|
||||||
##
|
##
|
||||||
## Returns: True if the port was sucessfully registered.
|
## Returns: True if the port was successfully registered.
|
||||||
global register_for_port: function(tag: Analyzer::Tag, p: port) : bool;
|
global register_for_port: function(tag: Analyzer::Tag, p: port) : bool;
|
||||||
|
|
||||||
## Returns a set of all well-known ports currently registered for a
|
## Returns a set of all well-known ports currently registered for a
|
||||||
|
@ -88,8 +88,8 @@ export {
|
||||||
## Returns: The analyzer tag corresponding to the name.
|
## Returns: The analyzer tag corresponding to the name.
|
||||||
global get_tag: function(name: string): Analyzer::Tag;
|
global get_tag: function(name: string): Analyzer::Tag;
|
||||||
|
|
||||||
## Schedules an analyzer for a future connection originating from a given IP
|
## Schedules an analyzer for a future connection originating from a
|
||||||
## address and port.
|
## given IP address and port.
|
||||||
##
|
##
|
||||||
## orig: The IP address originating a connection in the future.
|
## orig: The IP address originating a connection in the future.
|
||||||
## 0.0.0.0 can be used as a wildcard to match any originator address.
|
## 0.0.0.0 can be used as a wildcard to match any originator address.
|
||||||
|
@ -103,7 +103,7 @@ export {
|
||||||
## tout: A timeout interval after which the scheduling request will be
|
## tout: A timeout interval after which the scheduling request will be
|
||||||
## discarded if the connection has not yet been seen.
|
## discarded if the connection has not yet been seen.
|
||||||
##
|
##
|
||||||
## Returns: True if succesful.
|
## Returns: True if successful.
|
||||||
global schedule_analyzer: function(orig: addr, resp: addr, resp_p: port,
|
global schedule_analyzer: function(orig: addr, resp: addr, resp_p: port,
|
||||||
analyzer: Analyzer::Tag, tout: interval) : bool;
|
analyzer: Analyzer::Tag, tout: interval) : bool;
|
||||||
|
|
||||||
|
|
2
scripts/base/frameworks/cluster/README
Normal file
2
scripts/base/frameworks/cluster/README
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
The cluster framework provides for establishing and controlling a cluster
|
||||||
|
of Bro instances.
|
|
@ -39,7 +39,8 @@ export {
|
||||||
## The node type doing all the actual traffic analysis.
|
## The node type doing all the actual traffic analysis.
|
||||||
WORKER,
|
WORKER,
|
||||||
## A node acting as a traffic recorder using the
|
## A node acting as a traffic recorder using the
|
||||||
## `Time Machine <http://tracker.bro.org/time-machine>`_ software.
|
## `Time Machine <http://bro.org/community/time-machine.html>`_
|
||||||
|
## software.
|
||||||
TIME_MACHINE,
|
TIME_MACHINE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -58,7 +59,7 @@ export {
|
||||||
## Events raised by workers and handled by a manager.
|
## Events raised by workers and handled by a manager.
|
||||||
const worker2manager_events = /(TimeMachine::command|Drop::.*)/ &redef;
|
const worker2manager_events = /(TimeMachine::command|Drop::.*)/ &redef;
|
||||||
|
|
||||||
## Events raised by workers and handled by proxies..
|
## Events raised by workers and handled by proxies.
|
||||||
const worker2proxy_events = /EMPTY/ &redef;
|
const worker2proxy_events = /EMPTY/ &redef;
|
||||||
|
|
||||||
## Events raised by TimeMachine instances and handled by a manager.
|
## Events raised by TimeMachine instances and handled by a manager.
|
||||||
|
@ -80,7 +81,7 @@ export {
|
||||||
## If the *ip* field is a non-global IPv6 address, this field
|
## If the *ip* field is a non-global IPv6 address, this field
|
||||||
## can specify a particular :rfc:`4007` ``zone_id``.
|
## can specify a particular :rfc:`4007` ``zone_id``.
|
||||||
zone_id: string &default="";
|
zone_id: string &default="";
|
||||||
## The port to which the this local node can connect when
|
## The port to which this local node can connect when
|
||||||
## establishing communication.
|
## establishing communication.
|
||||||
p: port;
|
p: port;
|
||||||
## Identifier for the interface a worker is sniffing.
|
## Identifier for the interface a worker is sniffing.
|
||||||
|
|
|
@ -19,6 +19,6 @@ redef Log::default_rotation_postprocessor_cmd = "delete-log";
|
||||||
## Record all packets into trace file.
|
## Record all packets into trace file.
|
||||||
##
|
##
|
||||||
## Note that this only indicates that *if* we are recording packets, we want all
|
## Note that this only indicates that *if* we are recording packets, we want all
|
||||||
## of them (rather than just those the core deems sufficiently important). Setting
|
## of them (rather than just those the core deems sufficiently important).
|
||||||
## this does not turn recording on. Use '-w <trace>' for that.
|
## Setting this does not turn recording on. Use '-w <trace>' for that.
|
||||||
redef record_all_packets = T;
|
redef record_all_packets = T;
|
||||||
|
|
2
scripts/base/frameworks/communication/README
Normal file
2
scripts/base/frameworks/communication/README
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
The communication framework facilitates connecting to remote Bro or
|
||||||
|
Broccoli instances to share state and transfer events.
|
|
@ -42,10 +42,11 @@ export {
|
||||||
type Info: record {
|
type Info: record {
|
||||||
## The network time at which a communication event occurred.
|
## The network time at which a communication event occurred.
|
||||||
ts: time &log;
|
ts: time &log;
|
||||||
## The peer name (if any) with which a communication event is concerned.
|
## The peer name (if any) with which a communication event is
|
||||||
|
## concerned.
|
||||||
peer: string &log &optional;
|
peer: string &log &optional;
|
||||||
## Where the communication event message originated from, that is,
|
## Where the communication event message originated from, that
|
||||||
## either from the scripting layer or inside the Bro process.
|
## is, either from the scripting layer or inside the Bro process.
|
||||||
src_name: string &log &optional;
|
src_name: string &log &optional;
|
||||||
## .. todo:: currently unused.
|
## .. todo:: currently unused.
|
||||||
connected_peer_desc: string &log &optional;
|
connected_peer_desc: string &log &optional;
|
||||||
|
@ -71,8 +72,8 @@ export {
|
||||||
## can specify a particular :rfc:`4007` ``zone_id``.
|
## can specify a particular :rfc:`4007` ``zone_id``.
|
||||||
zone_id: string &optional;
|
zone_id: string &optional;
|
||||||
|
|
||||||
## Port of the remote Bro communication endpoint if we are initiating
|
## Port of the remote Bro communication endpoint if we are
|
||||||
## the connection based on the :bro:id:`connect` field.
|
## initiating the connection (based on the *connect* field).
|
||||||
p: port &optional;
|
p: port &optional;
|
||||||
|
|
||||||
## When accepting a connection, the configuration only
|
## When accepting a connection, the configuration only
|
||||||
|
@ -87,7 +88,7 @@ export {
|
||||||
events: pattern &optional;
|
events: pattern &optional;
|
||||||
|
|
||||||
## Whether we are going to connect (rather than waiting
|
## Whether we are going to connect (rather than waiting
|
||||||
## for the other sie to connect to us).
|
## for the other side to connect to us).
|
||||||
connect: bool &default = F;
|
connect: bool &default = F;
|
||||||
|
|
||||||
## If disconnected, reconnect after this many seconds.
|
## If disconnected, reconnect after this many seconds.
|
||||||
|
@ -103,13 +104,14 @@ export {
|
||||||
request_logs: bool &default = F;
|
request_logs: bool &default = F;
|
||||||
|
|
||||||
## When performing state synchronization, whether we consider
|
## When performing state synchronization, whether we consider
|
||||||
## our state to be authoritative. If so, we will send the peer
|
## our state to be authoritative (only one side can be
|
||||||
## our current set when the connection is set up.
|
## authoritative). If so, we will send the peer our current
|
||||||
## (Only one side can be authoritative)
|
## set when the connection is set up.
|
||||||
auth: bool &default = F;
|
auth: bool &default = F;
|
||||||
|
|
||||||
## If not set, no capture filter is sent.
|
## If not set, no capture filter is sent.
|
||||||
## If set to "", the default capture filter is sent.
|
## If set to an empty string, then the default capture filter
|
||||||
|
## is sent.
|
||||||
capture_filter: string &optional;
|
capture_filter: string &optional;
|
||||||
|
|
||||||
## Whether to use SSL-based communication.
|
## Whether to use SSL-based communication.
|
||||||
|
|
3
scripts/base/frameworks/control/README
Normal file
3
scripts/base/frameworks/control/README
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
The control framework provides the foundation for providing "commands"
|
||||||
|
that can be taken remotely at runtime to modify a running Bro instance
|
||||||
|
or collect information from the running instance.
|
|
@ -57,7 +57,8 @@ export {
|
||||||
## Returns the current net_stats.
|
## Returns the current net_stats.
|
||||||
global net_stats_response: event(s: string);
|
global net_stats_response: event(s: string);
|
||||||
|
|
||||||
## Inform the remote Bro instance that it's configuration may have been updated.
|
## Inform the remote Bro instance that it's configuration may have been
|
||||||
|
## updated.
|
||||||
global configuration_update_request: event();
|
global configuration_update_request: event();
|
||||||
## This event is a wrapper and alias for the
|
## This event is a wrapper and alias for the
|
||||||
## :bro:id:`Control::configuration_update_request` event.
|
## :bro:id:`Control::configuration_update_request` event.
|
||||||
|
|
2
scripts/base/frameworks/dpd/README
Normal file
2
scripts/base/frameworks/dpd/README
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
The DPD (dynamic protocol detection) activates port-independent protocol
|
||||||
|
detection and selectively disables analyzers if protocol violations occur.
|
3
scripts/base/frameworks/files/README
Normal file
3
scripts/base/frameworks/files/README
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
The file analysis framework provides an interface for driving the analysis
|
||||||
|
of files, possibly independent of any network protocol over which they're
|
||||||
|
transported.
|
|
@ -14,10 +14,11 @@ export {
|
||||||
LOG
|
LOG
|
||||||
};
|
};
|
||||||
|
|
||||||
## A structure which represents a desired type of file analysis.
|
## A structure which parameterizes a type of file analysis.
|
||||||
type AnalyzerArgs: record {
|
type AnalyzerArgs: record {
|
||||||
## An event which will be generated for all new file contents,
|
## An event which will be generated for all new file contents,
|
||||||
## chunk-wise. Used when *tag* is
|
## chunk-wise. Used when *tag* (in the
|
||||||
|
## :bro:see:`Files::add_analyzer` function) is
|
||||||
## :bro:see:`Files::ANALYZER_DATA_EVENT`.
|
## :bro:see:`Files::ANALYZER_DATA_EVENT`.
|
||||||
chunk_event: event(f: fa_file, data: string, off: count) &optional;
|
chunk_event: event(f: fa_file, data: string, off: count) &optional;
|
||||||
|
|
||||||
|
@ -47,12 +48,12 @@ export {
|
||||||
## the data traveled to.
|
## the data traveled to.
|
||||||
rx_hosts: set[addr] &log;
|
rx_hosts: set[addr] &log;
|
||||||
|
|
||||||
## Connection UIDS over which the file was transferred.
|
## Connection UIDs over which the file was transferred.
|
||||||
conn_uids: set[string] &log;
|
conn_uids: set[string] &log;
|
||||||
|
|
||||||
## An identification of the source of the file data. E.g. it may be
|
## An identification of the source of the file data. E.g. it
|
||||||
## a network protocol over which it was transferred, or a local file
|
## may be a network protocol over which it was transferred, or a
|
||||||
## path which was read, or some other input source.
|
## local file path which was read, or some other input source.
|
||||||
source: string &log &optional;
|
source: string &log &optional;
|
||||||
|
|
||||||
## A value to represent the depth of this file in relation
|
## A value to represent the depth of this file in relation
|
||||||
|
@ -64,9 +65,10 @@ export {
|
||||||
## A set of analysis types done during the file analysis.
|
## A set of analysis types done during the file analysis.
|
||||||
analyzers: set[string] &log;
|
analyzers: set[string] &log;
|
||||||
|
|
||||||
## A mime type provided by libmagic against the *bof_buffer*, or
|
## A mime type provided by libmagic against the *bof_buffer*
|
||||||
## in the cases where no buffering of the beginning of file occurs,
|
## field of :bro:see:`fa_file`, or in the cases where no
|
||||||
## an initial guess of the mime type based on the first data seen.
|
## buffering of the beginning of file occurs, an initial
|
||||||
|
## guess of the mime type based on the first data seen.
|
||||||
mime_type: string &log &optional;
|
mime_type: string &log &optional;
|
||||||
|
|
||||||
## A filename for the file if one is available from the source
|
## A filename for the file if one is available from the source
|
||||||
|
@ -79,12 +81,12 @@ export {
|
||||||
|
|
||||||
## If the source of this file is a network connection, this field
|
## If the source of this file is a network connection, this field
|
||||||
## indicates if the data originated from the local network or not as
|
## indicates if the data originated from the local network or not as
|
||||||
## determined by the configured bro:see:`Site::local_nets`.
|
## determined by the configured :bro:see:`Site::local_nets`.
|
||||||
local_orig: bool &log &optional;
|
local_orig: bool &log &optional;
|
||||||
|
|
||||||
## If the source of this file is a network connection, this field
|
## If the source of this file is a network connection, this field
|
||||||
## indicates if the file is being sent by the originator of the connection
|
## indicates if the file is being sent by the originator of the
|
||||||
## or the responder.
|
## connection or the responder.
|
||||||
is_orig: bool &log &optional;
|
is_orig: bool &log &optional;
|
||||||
|
|
||||||
## Number of bytes provided to the file analysis engine for the file.
|
## Number of bytes provided to the file analysis engine for the file.
|
||||||
|
@ -116,15 +118,15 @@ export {
|
||||||
## The salt concatenated to unique file handle strings generated by
|
## The salt concatenated to unique file handle strings generated by
|
||||||
## :bro:see:`get_file_handle` before hashing them in to a file id
|
## :bro:see:`get_file_handle` before hashing them in to a file id
|
||||||
## (the *id* field of :bro:see:`fa_file`).
|
## (the *id* field of :bro:see:`fa_file`).
|
||||||
## Provided to help mitigate the possiblility of manipulating parts of
|
## Provided to help mitigate the possibility of manipulating parts of
|
||||||
## network connections that factor in to the file handle in order to
|
## network connections that factor in to the file handle in order to
|
||||||
## generate two handles that would hash to the same file id.
|
## generate two handles that would hash to the same file id.
|
||||||
const salt = "I recommend changing this." &redef;
|
const salt = "I recommend changing this." &redef;
|
||||||
|
|
||||||
## Sets the *timeout_interval* field of :bro:see:`fa_file`, which is
|
## Sets the *timeout_interval* field of :bro:see:`fa_file`, which is
|
||||||
## used to determine the length of inactivity that is allowed for a file
|
## used to determine the length of inactivity that is allowed for a file
|
||||||
## before internal state related to it is cleaned up. When used within a
|
## before internal state related to it is cleaned up. When used within
|
||||||
## :bro:see:`file_timeout` handler, the analysis will delay timing out
|
## a :bro:see:`file_timeout` handler, the analysis will delay timing out
|
||||||
## again for the period specified by *t*.
|
## again for the period specified by *t*.
|
||||||
##
|
##
|
||||||
## f: the file.
|
## f: the file.
|
||||||
|
@ -132,7 +134,7 @@ export {
|
||||||
## t: the amount of time the file can remain inactive before discarding.
|
## t: the amount of time the file can remain inactive before discarding.
|
||||||
##
|
##
|
||||||
## Returns: true if the timeout interval was set, or false if analysis
|
## Returns: true if the timeout interval was set, or false if analysis
|
||||||
## for the *id* isn't currently active.
|
## for the file isn't currently active.
|
||||||
global set_timeout_interval: function(f: fa_file, t: interval): bool;
|
global set_timeout_interval: function(f: fa_file, t: interval): bool;
|
||||||
|
|
||||||
## Adds an analyzer to the analysis of a given file.
|
## Adds an analyzer to the analysis of a given file.
|
||||||
|
@ -144,7 +146,7 @@ export {
|
||||||
## args: any parameters the analyzer takes.
|
## args: any parameters the analyzer takes.
|
||||||
##
|
##
|
||||||
## Returns: true if the analyzer will be added, or false if analysis
|
## Returns: true if the analyzer will be added, or false if analysis
|
||||||
## for the *id* isn't currently active or the *args*
|
## for the file isn't currently active or the *args*
|
||||||
## were invalid for the analyzer type.
|
## were invalid for the analyzer type.
|
||||||
global add_analyzer: function(f: fa_file,
|
global add_analyzer: function(f: fa_file,
|
||||||
tag: Files::Tag,
|
tag: Files::Tag,
|
||||||
|
@ -154,10 +156,12 @@ export {
|
||||||
##
|
##
|
||||||
## f: the file.
|
## f: the file.
|
||||||
##
|
##
|
||||||
|
## tag: the analyzer type.
|
||||||
|
##
|
||||||
## args: the analyzer (type and args) to remove.
|
## args: the analyzer (type and args) to remove.
|
||||||
##
|
##
|
||||||
## Returns: true if the analyzer will be removed, or false if analysis
|
## Returns: true if the analyzer will be removed, or false if analysis
|
||||||
## for the *id* isn't currently active.
|
## for the file isn't currently active.
|
||||||
global remove_analyzer: function(f: fa_file,
|
global remove_analyzer: function(f: fa_file,
|
||||||
tag: Files::Tag,
|
tag: Files::Tag,
|
||||||
args: AnalyzerArgs &default=AnalyzerArgs()): bool;
|
args: AnalyzerArgs &default=AnalyzerArgs()): bool;
|
||||||
|
@ -167,11 +171,12 @@ export {
|
||||||
## f: the file.
|
## f: the file.
|
||||||
##
|
##
|
||||||
## Returns: true if analysis for the given file will be ignored for the
|
## Returns: true if analysis for the given file will be ignored for the
|
||||||
## rest of it's contents, or false if analysis for the *id*
|
## rest of its contents, or false if analysis for the file
|
||||||
## isn't currently active.
|
## isn't currently active.
|
||||||
global stop: function(f: fa_file): bool;
|
global stop: function(f: fa_file): bool;
|
||||||
|
|
||||||
## Translates an file analyzer enum value to a string with the analyzer's name.
|
## Translates a file analyzer enum value to a string with the
|
||||||
|
## analyzer's name.
|
||||||
##
|
##
|
||||||
## tag: The analyzer tag.
|
## tag: The analyzer tag.
|
||||||
##
|
##
|
||||||
|
@ -183,7 +188,7 @@ export {
|
||||||
##
|
##
|
||||||
## f: The file to be described.
|
## f: The file to be described.
|
||||||
##
|
##
|
||||||
## Returns a text description regarding metadata of the file.
|
## Returns: a text description regarding metadata of the file.
|
||||||
global describe: function(f: fa_file): string;
|
global describe: function(f: fa_file): string;
|
||||||
|
|
||||||
type ProtoRegistration: record {
|
type ProtoRegistration: record {
|
||||||
|
@ -209,10 +214,10 @@ export {
|
||||||
## Returns: true if the protocol being registered was not previously registered.
|
## Returns: true if the protocol being registered was not previously registered.
|
||||||
global register_protocol: function(tag: Analyzer::Tag, reg: ProtoRegistration): bool;
|
global register_protocol: function(tag: Analyzer::Tag, reg: ProtoRegistration): bool;
|
||||||
|
|
||||||
## Register a callback for file analyzers to use if they need to do some manipulation
|
## Register a callback for file analyzers to use if they need to do some
|
||||||
## when they are being added to a file before the core code takes over. This is
|
## manipulation when they are being added to a file before the core code
|
||||||
## unlikely to be interesting for users and should only be called by file analyzer
|
## takes over. This is unlikely to be interesting for users and should
|
||||||
## authors but it *not required*.
|
## only be called by file analyzer authors but is *not required*.
|
||||||
##
|
##
|
||||||
## tag: Tag for the file analyzer.
|
## tag: Tag for the file analyzer.
|
||||||
##
|
##
|
||||||
|
|
2
scripts/base/frameworks/input/README
Normal file
2
scripts/base/frameworks/input/README
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
The input framework provides a way to read previously stored data either as
|
||||||
|
an event stream or into a Bro table.
|
|
@ -33,45 +33,45 @@ export {
|
||||||
## that contain types that are not supported (at the moment
|
## that contain types that are not supported (at the moment
|
||||||
## file and function). If true, the input framework will
|
## file and function). If true, the input framework will
|
||||||
## warn in these cases, but continue. If false, it will
|
## warn in these cases, but continue. If false, it will
|
||||||
## abort. Defaults to false (abort)
|
## abort. Defaults to false (abort).
|
||||||
const accept_unsupported_types = F &redef;
|
const accept_unsupported_types = F &redef;
|
||||||
|
|
||||||
## TableFilter description type used for the `table` method.
|
## TableFilter description type used for the `table` method.
|
||||||
type TableDescription: record {
|
type TableDescription: record {
|
||||||
## Common definitions for tables and events
|
# Common definitions for tables and events
|
||||||
|
|
||||||
## String that allows the reader to find the source.
|
## String that allows the reader to find the source.
|
||||||
## For `READER_ASCII`, this is the filename.
|
## For `READER_ASCII`, this is the filename.
|
||||||
source: string;
|
source: string;
|
||||||
|
|
||||||
## Reader to use for this stream
|
## Reader to use for this stream.
|
||||||
reader: Reader &default=default_reader;
|
reader: Reader &default=default_reader;
|
||||||
|
|
||||||
## Read mode to use for this stream
|
## Read mode to use for this stream.
|
||||||
mode: Mode &default=default_mode;
|
mode: Mode &default=default_mode;
|
||||||
|
|
||||||
## Descriptive name. Used to remove a stream at a later time
|
## Descriptive name. Used to remove a stream at a later time.
|
||||||
name: string;
|
name: string;
|
||||||
|
|
||||||
# Special definitions for tables
|
# Special definitions for tables
|
||||||
|
|
||||||
## Table which will receive the data read by the input framework
|
## Table which will receive the data read by the input framework.
|
||||||
destination: any;
|
destination: any;
|
||||||
|
|
||||||
## Record that defines the values used as the index of the table
|
## Record that defines the values used as the index of the table.
|
||||||
idx: any;
|
idx: any;
|
||||||
|
|
||||||
## Record that defines the values used as the elements of the table
|
## Record that defines the values used as the elements of the table.
|
||||||
## If val is undefined, destination has to be a set.
|
## If this is undefined, then *destination* has to be a set.
|
||||||
val: any &optional;
|
val: any &optional;
|
||||||
|
|
||||||
## Defines if the value of the table is a record (default), or a single value. Val
|
## Defines if the value of the table is a record (default), or a single value.
|
||||||
## can only contain one element when this is set to false.
|
## When this is set to false, then *val* can only contain one element.
|
||||||
want_record: bool &default=T;
|
want_record: bool &default=T;
|
||||||
|
|
||||||
## The event that is raised each time a value is added to, changed in or removed
|
## The event that is raised each time a value is added to, changed in or removed
|
||||||
## from the table. The event will receive an Input::Event enum as the first
|
## from the table. The event will receive an Input::Event enum as the first
|
||||||
## argument, the idx record as the second argument and the value (record) as the
|
## argument, the *idx* record as the second argument and the value (record) as the
|
||||||
## third argument.
|
## third argument.
|
||||||
ev: any &optional; # event containing idx, val as values.
|
ev: any &optional; # event containing idx, val as values.
|
||||||
|
|
||||||
|
@ -88,19 +88,19 @@ export {
|
||||||
|
|
||||||
## EventFilter description type used for the `event` method.
|
## EventFilter description type used for the `event` method.
|
||||||
type EventDescription: record {
|
type EventDescription: record {
|
||||||
## Common definitions for tables and events
|
# Common definitions for tables and events
|
||||||
|
|
||||||
## String that allows the reader to find the source.
|
## String that allows the reader to find the source.
|
||||||
## For `READER_ASCII`, this is the filename.
|
## For `READER_ASCII`, this is the filename.
|
||||||
source: string;
|
source: string;
|
||||||
|
|
||||||
## Reader to use for this steam
|
## Reader to use for this stream.
|
||||||
reader: Reader &default=default_reader;
|
reader: Reader &default=default_reader;
|
||||||
|
|
||||||
## Read mode to use for this stream
|
## Read mode to use for this stream.
|
||||||
mode: Mode &default=default_mode;
|
mode: Mode &default=default_mode;
|
||||||
|
|
||||||
## Descriptive name. Used to remove a stream at a later time
|
## Descriptive name. Used to remove a stream at a later time.
|
||||||
name: string;
|
name: string;
|
||||||
|
|
||||||
# Special definitions for events
|
# Special definitions for events
|
||||||
|
@ -108,8 +108,8 @@ export {
|
||||||
## Record describing the fields to be retrieved from the source input.
|
## Record describing the fields to be retrieved from the source input.
|
||||||
fields: any;
|
fields: any;
|
||||||
|
|
||||||
## If want_record if false, the event receives each value in fields as a separate argument.
|
## If this is false, the event receives each value in fields as a separate argument.
|
||||||
## If it is set to true (default), the event receives all fields in a single record value.
|
## If this is set to true (default), the event receives all fields in a single record value.
|
||||||
want_record: bool &default=T;
|
want_record: bool &default=T;
|
||||||
|
|
||||||
## The event that is raised each time a new line is received from the reader.
|
## The event that is raised each time a new line is received from the reader.
|
||||||
|
@ -122,23 +122,23 @@ export {
|
||||||
config: table[string] of string &default=table();
|
config: table[string] of string &default=table();
|
||||||
};
|
};
|
||||||
|
|
||||||
## A file analyis input stream type used to forward input data to the
|
## A file analysis input stream type used to forward input data to the
|
||||||
## file analysis framework.
|
## file analysis framework.
|
||||||
type AnalysisDescription: record {
|
type AnalysisDescription: record {
|
||||||
## String that allows the reader to find the source.
|
## String that allows the reader to find the source.
|
||||||
## For `READER_ASCII`, this is the filename.
|
## For `READER_ASCII`, this is the filename.
|
||||||
source: string;
|
source: string;
|
||||||
|
|
||||||
## Reader to use for this steam. Compatible readers must be
|
## Reader to use for this stream. Compatible readers must be
|
||||||
## able to accept a filter of a single string type (i.e.
|
## able to accept a filter of a single string type (i.e.
|
||||||
## they read a byte stream).
|
## they read a byte stream).
|
||||||
reader: Reader &default=Input::READER_BINARY;
|
reader: Reader &default=Input::READER_BINARY;
|
||||||
|
|
||||||
## Read mode to use for this stream
|
## Read mode to use for this stream.
|
||||||
mode: Mode &default=default_mode;
|
mode: Mode &default=default_mode;
|
||||||
|
|
||||||
## Descriptive name that uniquely identifies the input source.
|
## Descriptive name that uniquely identifies the input source.
|
||||||
## Can be used used to remove a stream at a later time.
|
## Can be used to remove a stream at a later time.
|
||||||
## This will also be used for the unique *source* field of
|
## This will also be used for the unique *source* field of
|
||||||
## :bro:see:`fa_file`. Most of the time, the best choice for this
|
## :bro:see:`fa_file`. Most of the time, the best choice for this
|
||||||
## field will be the same value as the *source* field.
|
## field will be the same value as the *source* field.
|
||||||
|
@ -150,38 +150,44 @@ export {
|
||||||
config: table[string] of string &default=table();
|
config: table[string] of string &default=table();
|
||||||
};
|
};
|
||||||
|
|
||||||
## Create a new table input from a given source. Returns true on success.
|
## Create a new table input from a given source.
|
||||||
##
|
##
|
||||||
## description: `TableDescription` record describing the source.
|
## description: `TableDescription` record describing the source.
|
||||||
|
##
|
||||||
|
## Returns: true on success.
|
||||||
global add_table: function(description: Input::TableDescription) : bool;
|
global add_table: function(description: Input::TableDescription) : bool;
|
||||||
|
|
||||||
## Create a new event input from a given source. Returns true on success.
|
## Create a new event input from a given source.
|
||||||
##
|
##
|
||||||
## description: `TableDescription` record describing the source.
|
## description: `EventDescription` record describing the source.
|
||||||
|
##
|
||||||
|
## Returns: true on success.
|
||||||
global add_event: function(description: Input::EventDescription) : bool;
|
global add_event: function(description: Input::EventDescription) : bool;
|
||||||
|
|
||||||
## Create a new file analysis input from a given source. Data read from
|
## Create a new file analysis input from a given source. Data read from
|
||||||
## the source is automatically forwarded to the file analysis framework.
|
## the source is automatically forwarded to the file analysis framework.
|
||||||
##
|
##
|
||||||
## description: A record describing the source
|
## description: A record describing the source.
|
||||||
##
|
##
|
||||||
## Returns: true on sucess.
|
## Returns: true on success.
|
||||||
global add_analysis: function(description: Input::AnalysisDescription) : bool;
|
global add_analysis: function(description: Input::AnalysisDescription) : bool;
|
||||||
|
|
||||||
## Remove a input stream. Returns true on success and false if the named stream was
|
## Remove an input stream.
|
||||||
## not found.
|
|
||||||
##
|
##
|
||||||
## id: string value identifying the stream to be removed
|
## id: string value identifying the stream to be removed.
|
||||||
|
##
|
||||||
|
## Returns: true on success and false if the named stream was not found.
|
||||||
global remove: function(id: string) : bool;
|
global remove: function(id: string) : bool;
|
||||||
|
|
||||||
## Forces the current input to be checked for changes.
|
## Forces the current input to be checked for changes.
|
||||||
## Returns true on success and false if the named stream was not found
|
|
||||||
##
|
##
|
||||||
## id: string value identifying the stream
|
## id: string value identifying the stream.
|
||||||
|
##
|
||||||
|
## Returns: true on success and false if the named stream was not found.
|
||||||
global force_update: function(id: string) : bool;
|
global force_update: function(id: string) : bool;
|
||||||
|
|
||||||
## Event that is called, when the end of a data source has been reached, including
|
## Event that is called when the end of a data source has been reached,
|
||||||
## after an update.
|
## including after an update.
|
||||||
global end_of_data: event(name: string, source:string);
|
global end_of_data: event(name: string, source:string);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,11 +6,11 @@ module InputAscii;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## Separator between fields.
|
## Separator between fields.
|
||||||
## Please note that the separator has to be exactly one character long
|
## Please note that the separator has to be exactly one character long.
|
||||||
const separator = Input::separator &redef;
|
const separator = Input::separator &redef;
|
||||||
|
|
||||||
## Separator between set elements.
|
## Separator between set elements.
|
||||||
## Please note that the separator has to be exactly one character long
|
## Please note that the separator has to be exactly one character long.
|
||||||
const set_separator = Input::set_separator &redef;
|
const set_separator = Input::set_separator &redef;
|
||||||
|
|
||||||
## String to use for empty fields.
|
## String to use for empty fields.
|
||||||
|
|
|
@ -1,23 +1,23 @@
|
||||||
##! Interface for the ascii input reader.
|
##! Interface for the benchmark input reader.
|
||||||
|
|
||||||
module InputBenchmark;
|
module InputBenchmark;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## multiplication factor for each second
|
## Multiplication factor for each second.
|
||||||
const factor = 1.0 &redef;
|
const factor = 1.0 &redef;
|
||||||
|
|
||||||
## spread factor between lines
|
## Spread factor between lines.
|
||||||
const spread = 0 &redef;
|
const spread = 0 &redef;
|
||||||
|
|
||||||
## spreading where usleep = 1000000 / autospread * num_lines
|
## Spreading where usleep = 1000000 / autospread * num_lines
|
||||||
const autospread = 0.0 &redef;
|
const autospread = 0.0 &redef;
|
||||||
|
|
||||||
## addition factor for each heartbeat
|
## Addition factor for each heartbeat.
|
||||||
const addfactor = 0 &redef;
|
const addfactor = 0 &redef;
|
||||||
|
|
||||||
## stop spreading at x lines per heartbeat
|
## Stop spreading at x lines per heartbeat.
|
||||||
const stopspreadat = 0 &redef;
|
const stopspreadat = 0 &redef;
|
||||||
|
|
||||||
## 1 -> enable timed spreading
|
## 1 -> enable timed spreading.
|
||||||
const timedspread = 0.0 &redef;
|
const timedspread = 0.0 &redef;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,14 +4,14 @@ module InputRaw;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## Separator between input records.
|
## Separator between input records.
|
||||||
## Please note that the separator has to be exactly one character long
|
## Please note that the separator has to be exactly one character long.
|
||||||
const record_separator = "\n" &redef;
|
const record_separator = "\n" &redef;
|
||||||
|
|
||||||
## Event that is called when a process created by the raw reader exits.
|
## Event that is called when a process created by the raw reader exits.
|
||||||
##
|
##
|
||||||
## name: name of the input stream
|
## name: name of the input stream.
|
||||||
## source: source of the input stream
|
## source: source of the input stream.
|
||||||
## exit_code: exit code of the program, or number of the signal that forced the program to exit
|
## exit_code: exit code of the program, or number of the signal that forced the program to exit.
|
||||||
## signal_exit: false when program exitted normally, true when program was forced to exit by a signal
|
## signal_exit: false when program exited normally, true when program was forced to exit by a signal.
|
||||||
global process_finished: event(name: string, source:string, exit_code:count, signal_exit:bool);
|
global process_finished: event(name: string, source:string, exit_code:count, signal_exit:bool);
|
||||||
}
|
}
|
||||||
|
|
3
scripts/base/frameworks/intel/README
Normal file
3
scripts/base/frameworks/intel/README
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
The intelligence framework provides a way to store and query intelligence
|
||||||
|
data (such as IP addresses or strings). Metadata can also be associated
|
||||||
|
with the intelligence.
|
|
@ -1,5 +1,5 @@
|
||||||
##! Cluster transparency support for the intelligence framework. This is mostly oriented
|
##! Cluster transparency support for the intelligence framework. This is mostly
|
||||||
##! toward distributing intelligence information across clusters.
|
##! oriented toward distributing intelligence information across clusters.
|
||||||
|
|
||||||
@load base/frameworks/cluster
|
@load base/frameworks/cluster
|
||||||
@load ./input
|
@load ./input
|
||||||
|
|
|
@ -4,7 +4,7 @@ module Intel;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## Intelligence files that will be read off disk. The files are
|
## Intelligence files that will be read off disk. The files are
|
||||||
## reread everytime they are updated so updates much be atomic with
|
## reread every time they are updated so updates must be atomic with
|
||||||
## "mv" instead of writing the file in place.
|
## "mv" instead of writing the file in place.
|
||||||
const read_files: set[string] = {} &redef;
|
const read_files: set[string] = {} &redef;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
##! The intelligence framework provides a way to store and query IP addresses,
|
##! The intelligence framework provides a way to store and query IP addresses,
|
||||||
##! and strings (with a str_type). Metadata can
|
##! and strings (with a str_type). Metadata can
|
||||||
##! also be associated with the intelligence like for making more informed
|
##! also be associated with the intelligence, like for making more informed
|
||||||
##! decisions about matching and handling of intelligence.
|
##! decisions about matching and handling of intelligence.
|
||||||
|
|
||||||
@load base/frameworks/notice
|
@load base/frameworks/notice
|
||||||
|
@ -14,7 +14,7 @@ export {
|
||||||
type Type: enum {
|
type Type: enum {
|
||||||
## An IP address.
|
## An IP address.
|
||||||
ADDR,
|
ADDR,
|
||||||
## A complete URL without the prefix "http://".
|
## A complete URL without the prefix ``"http://"``.
|
||||||
URL,
|
URL,
|
||||||
## Software name.
|
## Software name.
|
||||||
SOFTWARE,
|
SOFTWARE,
|
||||||
|
@ -24,21 +24,22 @@ export {
|
||||||
DOMAIN,
|
DOMAIN,
|
||||||
## A user name.
|
## A user name.
|
||||||
USER_NAME,
|
USER_NAME,
|
||||||
## File hash which is non-hash type specific. It's up to the user to query
|
## File hash which is non-hash type specific. It's up to the
|
||||||
## for any relevant hash types.
|
## user to query for any relevant hash types.
|
||||||
FILE_HASH,
|
FILE_HASH,
|
||||||
## File names. Typically with protocols with definite indications
|
## File name. Typically with protocols with definite
|
||||||
## of a file name.
|
## indications of a file name.
|
||||||
FILE_NAME,
|
FILE_NAME,
|
||||||
## Certificate SHA-1 hash.
|
## Certificate SHA-1 hash.
|
||||||
CERT_HASH,
|
CERT_HASH,
|
||||||
};
|
};
|
||||||
|
|
||||||
## Data about an :bro:type:`Intel::Item`
|
## Data about an :bro:type:`Intel::Item`.
|
||||||
type MetaData: record {
|
type MetaData: record {
|
||||||
## An arbitrary string value representing the data source. Typically,
|
## An arbitrary string value representing the data source.
|
||||||
## the convention for this field will be the source name and feed name
|
## Typically, the convention for this field will be the source
|
||||||
## separated by a hyphen. For example: "source1-c&c".
|
## name and feed name separated by a hyphen.
|
||||||
|
## For example: "source1-c&c".
|
||||||
source: string;
|
source: string;
|
||||||
## A freeform description for the data.
|
## A freeform description for the data.
|
||||||
desc: string &optional;
|
desc: string &optional;
|
||||||
|
@ -81,7 +82,7 @@ export {
|
||||||
where: Where &log;
|
where: Where &log;
|
||||||
|
|
||||||
## If the data was discovered within a connection, the
|
## If the data was discovered within a connection, the
|
||||||
## connection record should go into get to give context to the data.
|
## connection record should go here to give context to the data.
|
||||||
conn: connection &optional;
|
conn: connection &optional;
|
||||||
|
|
||||||
## If the data was discovered within a file, the file record
|
## If the data was discovered within a file, the file record
|
||||||
|
@ -106,10 +107,12 @@ export {
|
||||||
## this is the uid for the file.
|
## this is the uid for the file.
|
||||||
fuid: string &log &optional;
|
fuid: string &log &optional;
|
||||||
## A mime type if the intelligence hit is related to a file.
|
## A mime type if the intelligence hit is related to a file.
|
||||||
## If the $f field is provided this will be automatically filled out.
|
## If the $f field is provided this will be automatically filled
|
||||||
|
## out.
|
||||||
file_mime_type: string &log &optional;
|
file_mime_type: string &log &optional;
|
||||||
## Frequently files can be "described" to give a bit more context.
|
## Frequently files can be "described" to give a bit more context.
|
||||||
## If the $f field is provided this field will be automatically filled out.
|
## If the $f field is provided this field will be automatically
|
||||||
|
## filled out.
|
||||||
file_desc: string &log &optional;
|
file_desc: string &log &optional;
|
||||||
|
|
||||||
## Where the data was seen.
|
## Where the data was seen.
|
||||||
|
@ -125,13 +128,13 @@ export {
|
||||||
## it against known intelligence for matches.
|
## it against known intelligence for matches.
|
||||||
global seen: function(s: Seen);
|
global seen: function(s: Seen);
|
||||||
|
|
||||||
## Event to represent a match in the intelligence data from data that was seen.
|
## Event to represent a match in the intelligence data from data that
|
||||||
## On clusters there is no assurance as to where this event will be generated
|
## was seen. On clusters there is no assurance as to where this event
|
||||||
## so do not assume that arbitrary global state beyond the given data
|
## will be generated so do not assume that arbitrary global state beyond
|
||||||
## will be available.
|
## the given data will be available.
|
||||||
##
|
##
|
||||||
## This is the primary mechanism where a user will take actions based on data
|
## This is the primary mechanism where a user will take actions based on
|
||||||
## within the intelligence framework.
|
## data within the intelligence framework.
|
||||||
global match: event(s: Seen, items: set[Item]);
|
global match: event(s: Seen, items: set[Item]);
|
||||||
|
|
||||||
global log_intel: event(rec: Info);
|
global log_intel: event(rec: Info);
|
||||||
|
@ -140,7 +143,7 @@ export {
|
||||||
# Internal handler for matches with no metadata available.
|
# Internal handler for matches with no metadata available.
|
||||||
global match_no_items: event(s: Seen);
|
global match_no_items: event(s: Seen);
|
||||||
|
|
||||||
# Internal events for cluster data distribution
|
# Internal events for cluster data distribution.
|
||||||
global new_item: event(item: Item);
|
global new_item: event(item: Item);
|
||||||
global updated_item: event(item: Item);
|
global updated_item: event(item: Item);
|
||||||
|
|
||||||
|
|
1
scripts/base/frameworks/logging/README
Normal file
1
scripts/base/frameworks/logging/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
The logging framework provides a flexible key-value based logging interface.
|
|
@ -1,6 +1,6 @@
|
||||||
##! The Bro logging interface.
|
##! The Bro logging interface.
|
||||||
##!
|
##!
|
||||||
##! See :doc:`/frameworks/logging` for a introduction to Bro's
|
##! See :doc:`/frameworks/logging` for an introduction to Bro's
|
||||||
##! logging framework.
|
##! logging framework.
|
||||||
|
|
||||||
module Log;
|
module Log;
|
||||||
|
@ -27,7 +27,7 @@ export {
|
||||||
const set_separator = "," &redef;
|
const set_separator = "," &redef;
|
||||||
|
|
||||||
## String to use for empty fields. This should be different from
|
## String to use for empty fields. This should be different from
|
||||||
## *unset_field* to make the output non-ambigious.
|
## *unset_field* to make the output unambiguous.
|
||||||
## Can be overwritten by individual writers.
|
## Can be overwritten by individual writers.
|
||||||
const empty_field = "(empty)" &redef;
|
const empty_field = "(empty)" &redef;
|
||||||
|
|
||||||
|
@ -41,8 +41,8 @@ export {
|
||||||
columns: any;
|
columns: any;
|
||||||
|
|
||||||
## Event that will be raised once for each log entry.
|
## Event that will be raised once for each log entry.
|
||||||
## The event receives a single same parameter, an instance of type
|
## The event receives a single same parameter, an instance of
|
||||||
## ``columns``.
|
## type ``columns``.
|
||||||
ev: any &optional;
|
ev: any &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ export {
|
||||||
##
|
##
|
||||||
## The specific interpretation of the string is up to
|
## The specific interpretation of the string is up to
|
||||||
## the used writer, and may for example be the destination
|
## the used writer, and may for example be the destination
|
||||||
## file name. Generally, filenames are expected to given
|
## file name. Generally, filenames are expected to be given
|
||||||
## without any extensions; writers will add appropiate
|
## without any extensions; writers will add appropiate
|
||||||
## extensions automatically.
|
## extensions automatically.
|
||||||
##
|
##
|
||||||
|
@ -126,34 +126,36 @@ export {
|
||||||
path: string &optional;
|
path: string &optional;
|
||||||
|
|
||||||
## A function returning the output path for recording entries
|
## A function returning the output path for recording entries
|
||||||
## matching this filter. This is similar to ``path`` yet allows
|
## matching this filter. This is similar to *path* yet allows
|
||||||
## to compute the string dynamically. It is ok to return
|
## to compute the string dynamically. It is ok to return
|
||||||
## different strings for separate calls, but be careful: it's
|
## different strings for separate calls, but be careful: it's
|
||||||
## easy to flood the disk by returning a new string for each
|
## easy to flood the disk by returning a new string for each
|
||||||
## connection ...
|
## connection.
|
||||||
##
|
##
|
||||||
## id: The ID associated with the log stream.
|
## id: The ID associated with the log stream.
|
||||||
##
|
##
|
||||||
## path: A suggested path value, which may be either the filter's
|
## path: A suggested path value, which may be either the filter's
|
||||||
## ``path`` if defined, else a previous result from the function.
|
## ``path`` if defined, else a previous result from the
|
||||||
## If no ``path`` is defined for the filter, then the first call
|
## function. If no ``path`` is defined for the filter,
|
||||||
## to the function will contain an empty string.
|
## then the first call to the function will contain an
|
||||||
|
## empty string.
|
||||||
##
|
##
|
||||||
## rec: An instance of the streams's ``columns`` type with its
|
## rec: An instance of the streams's ``columns`` type with its
|
||||||
## fields set to the values to be logged.
|
## fields set to the values to be logged.
|
||||||
##
|
##
|
||||||
## Returns: The path to be used for the filter, which will be subject
|
## Returns: The path to be used for the filter, which will be
|
||||||
## to the same automatic correction rules as the *path*
|
## subject to the same automatic correction rules as
|
||||||
## field of :bro:type:`Log::Filter` in the case of conflicts
|
## the *path* field of :bro:type:`Log::Filter` in the
|
||||||
## with other filters trying to use the same writer/path pair.
|
## case of conflicts with other filters trying to use
|
||||||
|
## the same writer/path pair.
|
||||||
path_func: function(id: ID, path: string, rec: any): string &optional;
|
path_func: function(id: ID, path: string, rec: any): string &optional;
|
||||||
|
|
||||||
## Subset of column names to record. If not given, all
|
## Subset of column names to record. If not given, all
|
||||||
## columns are recorded.
|
## columns are recorded.
|
||||||
include: set[string] &optional;
|
include: set[string] &optional;
|
||||||
|
|
||||||
## Subset of column names to exclude from recording. If not given,
|
## Subset of column names to exclude from recording. If not
|
||||||
## all columns are recorded.
|
## given, all columns are recorded.
|
||||||
exclude: set[string] &optional;
|
exclude: set[string] &optional;
|
||||||
|
|
||||||
## If true, entries are recorded locally.
|
## If true, entries are recorded locally.
|
||||||
|
@ -229,7 +231,7 @@ export {
|
||||||
##
|
##
|
||||||
## filter: A record describing the desired logging parameters.
|
## filter: A record describing the desired logging parameters.
|
||||||
##
|
##
|
||||||
## Returns: True if the filter was sucessfully added, false if
|
## Returns: True if the filter was successfully added, false if
|
||||||
## the filter was not added or the *filter* argument was not
|
## the filter was not added or the *filter* argument was not
|
||||||
## the correct type.
|
## the correct type.
|
||||||
##
|
##
|
||||||
|
@ -277,7 +279,7 @@ export {
|
||||||
##
|
##
|
||||||
## Returns: True if the stream was found and no error occurred in writing
|
## Returns: True if the stream was found and no error occurred in writing
|
||||||
## to it or if the stream was disabled and nothing was written.
|
## to it or if the stream was disabled and nothing was written.
|
||||||
## False if the stream was was not found, or the *columns*
|
## False if the stream was not found, or the *columns*
|
||||||
## argument did not match what the stream was initially defined
|
## argument did not match what the stream was initially defined
|
||||||
## to handle, or one of the stream's filters has an invalid
|
## to handle, or one of the stream's filters has an invalid
|
||||||
## ``path_func``.
|
## ``path_func``.
|
||||||
|
@ -286,8 +288,8 @@ export {
|
||||||
global write: function(id: ID, columns: any) : bool;
|
global write: function(id: ID, columns: any) : bool;
|
||||||
|
|
||||||
## Sets the buffering status for all the writers of a given logging stream.
|
## Sets the buffering status for all the writers of a given logging stream.
|
||||||
## A given writer implementation may or may not support buffering and if it
|
## A given writer implementation may or may not support buffering and if
|
||||||
## doesn't then toggling buffering with this function has no effect.
|
## it doesn't then toggling buffering with this function has no effect.
|
||||||
##
|
##
|
||||||
## id: The ID associated with a logging stream for which to
|
## id: The ID associated with a logging stream for which to
|
||||||
## enable/disable buffering.
|
## enable/disable buffering.
|
||||||
|
@ -347,7 +349,7 @@ export {
|
||||||
##
|
##
|
||||||
## npath: The new path of the file (after already being rotated/processed
|
## npath: The new path of the file (after already being rotated/processed
|
||||||
## by writer-specific postprocessor as defined in
|
## by writer-specific postprocessor as defined in
|
||||||
## :bro:id:`Log::default_rotation_postprocessors`.
|
## :bro:id:`Log::default_rotation_postprocessors`).
|
||||||
##
|
##
|
||||||
## Returns: True when :bro:id:`Log::default_rotation_postprocessor_cmd`
|
## Returns: True when :bro:id:`Log::default_rotation_postprocessor_cmd`
|
||||||
## is empty or the system command given by it has been invoked
|
## is empty or the system command given by it has been invoked
|
||||||
|
|
|
@ -16,9 +16,9 @@
|
||||||
module Log;
|
module Log;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## Secure-copies the rotated-log to all the remote hosts
|
## Secure-copies the rotated log to all the remote hosts
|
||||||
## defined in :bro:id:`Log::scp_destinations` and then deletes
|
## defined in :bro:id:`Log::scp_destinations` and then deletes
|
||||||
## the local copy of the rotated-log. It's not active when
|
## the local copy of the rotated log. It's not active when
|
||||||
## reading from trace files.
|
## reading from trace files.
|
||||||
##
|
##
|
||||||
## info: A record holding meta-information about the log file to be
|
## info: A record holding meta-information about the log file to be
|
||||||
|
@ -42,9 +42,9 @@ export {
|
||||||
};
|
};
|
||||||
|
|
||||||
## A table indexed by a particular log writer and filter path, that yields
|
## A table indexed by a particular log writer and filter path, that yields
|
||||||
## a set remote destinations. The :bro:id:`Log::scp_postprocessor`
|
## a set of remote destinations. The :bro:id:`Log::scp_postprocessor`
|
||||||
## function queries this table upon log rotation and performs a secure
|
## function queries this table upon log rotation and performs a secure
|
||||||
## copy of the rotated-log to each destination in the set. This
|
## copy of the rotated log to each destination in the set. This
|
||||||
## table can be modified at run-time.
|
## table can be modified at run-time.
|
||||||
global scp_destinations: table[Writer, string] of set[SCPDestination];
|
global scp_destinations: table[Writer, string] of set[SCPDestination];
|
||||||
|
|
||||||
|
|
|
@ -16,9 +16,9 @@
|
||||||
module Log;
|
module Log;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## Securely transfers the rotated-log to all the remote hosts
|
## Securely transfers the rotated log to all the remote hosts
|
||||||
## defined in :bro:id:`Log::sftp_destinations` and then deletes
|
## defined in :bro:id:`Log::sftp_destinations` and then deletes
|
||||||
## the local copy of the rotated-log. It's not active when
|
## the local copy of the rotated log. It's not active when
|
||||||
## reading from trace files.
|
## reading from trace files.
|
||||||
##
|
##
|
||||||
## info: A record holding meta-information about the log file to be
|
## info: A record holding meta-information about the log file to be
|
||||||
|
@ -42,9 +42,9 @@ export {
|
||||||
};
|
};
|
||||||
|
|
||||||
## A table indexed by a particular log writer and filter path, that yields
|
## A table indexed by a particular log writer and filter path, that yields
|
||||||
## a set remote destinations. The :bro:id:`Log::sftp_postprocessor`
|
## a set of remote destinations. The :bro:id:`Log::sftp_postprocessor`
|
||||||
## function queries this table upon log rotation and performs a secure
|
## function queries this table upon log rotation and performs a secure
|
||||||
## transfer of the rotated-log to each destination in the set. This
|
## transfer of the rotated log to each destination in the set. This
|
||||||
## table can be modified at run-time.
|
## table can be modified at run-time.
|
||||||
global sftp_destinations: table[Writer, string] of set[SFTPDestination];
|
global sftp_destinations: table[Writer, string] of set[SFTPDestination];
|
||||||
|
|
||||||
|
|
|
@ -2,10 +2,10 @@
|
||||||
##! to tweak the output format of ASCII logs.
|
##! to tweak the output format of ASCII logs.
|
||||||
##!
|
##!
|
||||||
##! The ASCII writer supports currently one writer-specific filter option via
|
##! The ASCII writer supports currently one writer-specific filter option via
|
||||||
##! ``config``: setting ``tsv`` to the string ``T`` turns the output into into
|
##! ``config``: setting ``tsv`` to the string ``T`` turns the output into
|
||||||
##! "tab-separated-value" mode where only a single header row with the column names
|
##! "tab-separated-value" mode where only a single header row with the column
|
||||||
##! is printed out as meta information, with no "# fields" prepended; no other meta
|
##! names is printed out as meta information, with no "# fields" prepended; no
|
||||||
##! data gets included in that mode.
|
##! other meta data gets included in that mode.
|
||||||
##!
|
##!
|
||||||
##! Example filter using this::
|
##! Example filter using this::
|
||||||
##!
|
##!
|
||||||
|
@ -19,9 +19,9 @@ export {
|
||||||
## into files. This is primarily for debugging purposes.
|
## into files. This is primarily for debugging purposes.
|
||||||
const output_to_stdout = F &redef;
|
const output_to_stdout = F &redef;
|
||||||
|
|
||||||
## If true, include lines with log meta information such as column names with
|
## If true, include lines with log meta information such as column names
|
||||||
## types, the values of ASCII logging options that in use, and the time when the
|
## with types, the values of ASCII logging options that are in use, and
|
||||||
## file was opened and closes (the latter at the end).
|
## the time when the file was opened and closed (the latter at the end).
|
||||||
const include_meta = T &redef;
|
const include_meta = T &redef;
|
||||||
|
|
||||||
## Prefix for lines with meta information.
|
## Prefix for lines with meta information.
|
||||||
|
@ -34,7 +34,7 @@ export {
|
||||||
const set_separator = Log::set_separator &redef;
|
const set_separator = Log::set_separator &redef;
|
||||||
|
|
||||||
## String to use for empty fields. This should be different from
|
## String to use for empty fields. This should be different from
|
||||||
## *unset_field* to make the output non-ambigious.
|
## *unset_field* to make the output unambiguous.
|
||||||
const empty_field = Log::empty_field &redef;
|
const empty_field = Log::empty_field &redef;
|
||||||
|
|
||||||
## String to use for an unset &optional field.
|
## String to use for an unset &optional field.
|
||||||
|
|
|
@ -6,16 +6,16 @@ export {
|
||||||
## Compression to use with the DS output file. Options are:
|
## Compression to use with the DS output file. Options are:
|
||||||
##
|
##
|
||||||
## 'none' -- No compression.
|
## 'none' -- No compression.
|
||||||
## 'lzf' -- LZF compression. Very quick, but leads to larger output files.
|
## 'lzf' -- LZF compression (very quick, but leads to larger output files).
|
||||||
## 'lzo' -- LZO compression. Very fast decompression times.
|
## 'lzo' -- LZO compression (very fast decompression times).
|
||||||
## 'gz' -- GZIP compression. Slower than LZF, but also produces smaller output.
|
## 'gz' -- GZIP compression (slower than LZF, but also produces smaller output).
|
||||||
## 'bz2' -- BZIP2 compression. Slower than GZIP, but also produces smaller output.
|
## 'bz2' -- BZIP2 compression (slower than GZIP, but also produces smaller output).
|
||||||
const compression = "gz" &redef;
|
const compression = "gz" &redef;
|
||||||
|
|
||||||
## The extent buffer size.
|
## The extent buffer size.
|
||||||
## Larger values here lead to better compression and more efficient writes, but
|
## Larger values here lead to better compression and more efficient writes,
|
||||||
## also increase the lag between the time events are received and the time they
|
## but also increase the lag between the time events are received and
|
||||||
## are actually written to disk.
|
## the time they are actually written to disk.
|
||||||
const extent_size = 65536 &redef;
|
const extent_size = 65536 &redef;
|
||||||
|
|
||||||
## Should we dump the XML schema we use for this DS file to disk?
|
## Should we dump the XML schema we use for this DS file to disk?
|
||||||
|
@ -43,8 +43,8 @@ export {
|
||||||
}
|
}
|
||||||
|
|
||||||
# Default function to postprocess a rotated DataSeries log file. It moves the
|
# Default function to postprocess a rotated DataSeries log file. It moves the
|
||||||
# rotated file to a new name that includes a timestamp with the opening time, and
|
# rotated file to a new name that includes a timestamp with the opening time,
|
||||||
# then runs the writer's default postprocessor command on it.
|
# and then runs the writer's default postprocessor command on it.
|
||||||
function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool
|
function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool
|
||||||
{
|
{
|
||||||
# Move file to name including both opening and closing time.
|
# Move file to name including both opening and closing time.
|
||||||
|
|
|
@ -10,16 +10,16 @@
|
||||||
module LogElasticSearch;
|
module LogElasticSearch;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## Name of the ES cluster
|
## Name of the ES cluster.
|
||||||
const cluster_name = "elasticsearch" &redef;
|
const cluster_name = "elasticsearch" &redef;
|
||||||
|
|
||||||
## ES Server
|
## ES server.
|
||||||
const server_host = "127.0.0.1" &redef;
|
const server_host = "127.0.0.1" &redef;
|
||||||
|
|
||||||
## ES Port
|
## ES port.
|
||||||
const server_port = 9200 &redef;
|
const server_port = 9200 &redef;
|
||||||
|
|
||||||
## Name of the ES index
|
## Name of the ES index.
|
||||||
const index_prefix = "bro" &redef;
|
const index_prefix = "bro" &redef;
|
||||||
|
|
||||||
## The ES type prefix comes before the name of the related log.
|
## The ES type prefix comes before the name of the related log.
|
||||||
|
@ -27,9 +27,9 @@ export {
|
||||||
const type_prefix = "" &redef;
|
const type_prefix = "" &redef;
|
||||||
|
|
||||||
## The time before an ElasticSearch transfer will timeout. Note that
|
## The time before an ElasticSearch transfer will timeout. Note that
|
||||||
## the fractional part of the timeout will be ignored. In particular, time
|
## the fractional part of the timeout will be ignored. In particular,
|
||||||
## specifications less than a second result in a timeout value of 0, which
|
## time specifications less than a second result in a timeout value of
|
||||||
## means "no timeout."
|
## 0, which means "no timeout."
|
||||||
const transfer_timeout = 2secs;
|
const transfer_timeout = 2secs;
|
||||||
|
|
||||||
## The batch size is the number of messages that will be queued up before
|
## The batch size is the number of messages that will be queued up before
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
##! Interface for the None log writer. Thiis writer is mainly for debugging.
|
##! Interface for the None log writer. This writer is mainly for debugging.
|
||||||
|
|
||||||
module LogNone;
|
module LogNone;
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ export {
|
||||||
const unset_field = Log::unset_field &redef;
|
const unset_field = Log::unset_field &redef;
|
||||||
|
|
||||||
## String to use for empty fields. This should be different from
|
## String to use for empty fields. This should be different from
|
||||||
## *unset_field* to make the output non-ambigious.
|
## *unset_field* to make the output unambiguous.
|
||||||
const empty_field = Log::empty_field &redef;
|
const empty_field = Log::empty_field &redef;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
1
scripts/base/frameworks/packet-filter/README
Normal file
1
scripts/base/frameworks/packet-filter/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
The packet filter framework supports how Bro sets its BPF capture filter.
|
|
@ -1,4 +1,4 @@
|
||||||
##! This script supports how Bro sets it's BPF capture filter. By default
|
##! This script supports how Bro sets its BPF capture filter. By default
|
||||||
##! Bro sets a capture filter that allows all traffic. If a filter
|
##! Bro sets a capture filter that allows all traffic. If a filter
|
||||||
##! is set on the command line, that filter takes precedence over the default
|
##! is set on the command line, that filter takes precedence over the default
|
||||||
##! open filter and all filters defined in Bro scripts with the
|
##! open filter and all filters defined in Bro scripts with the
|
||||||
|
@ -19,7 +19,7 @@ export {
|
||||||
## This notice is generated if a packet filter cannot be compiled.
|
## This notice is generated if a packet filter cannot be compiled.
|
||||||
Compile_Failure,
|
Compile_Failure,
|
||||||
|
|
||||||
## Generated if a packet filter is fails to install.
|
## Generated if a packet filter fails to install.
|
||||||
Install_Failure,
|
Install_Failure,
|
||||||
|
|
||||||
## Generated when a notice takes too long to compile.
|
## Generated when a notice takes too long to compile.
|
||||||
|
@ -33,8 +33,8 @@ export {
|
||||||
ts: time &log;
|
ts: time &log;
|
||||||
|
|
||||||
## This is a string representation of the node that applied this
|
## This is a string representation of the node that applied this
|
||||||
## packet filter. It's mostly useful in the context of dynamically
|
## packet filter. It's mostly useful in the context of
|
||||||
## changing filters on clusters.
|
## dynamically changing filters on clusters.
|
||||||
node: string &log &optional;
|
node: string &log &optional;
|
||||||
|
|
||||||
## The packet filter that is being set.
|
## The packet filter that is being set.
|
||||||
|
@ -48,27 +48,28 @@ export {
|
||||||
};
|
};
|
||||||
|
|
||||||
## The BPF filter that is used by default to define what traffic should
|
## The BPF filter that is used by default to define what traffic should
|
||||||
## be captured. Filters defined in :bro:id:`restrict_filters` will still
|
## be captured. Filters defined in :bro:id:`restrict_filters` will
|
||||||
## be applied to reduce the captured traffic.
|
## still be applied to reduce the captured traffic.
|
||||||
const default_capture_filter = "ip or not ip" &redef;
|
const default_capture_filter = "ip or not ip" &redef;
|
||||||
|
|
||||||
## Filter string which is unconditionally or'ed to the beginning of every
|
## Filter string which is unconditionally or'ed to the beginning of
|
||||||
## dynamically built filter.
|
## every dynamically built filter.
|
||||||
const unrestricted_filter = "" &redef;
|
const unrestricted_filter = "" &redef;
|
||||||
|
|
||||||
## Filter string which is unconditionally and'ed to the beginning of every
|
## Filter string which is unconditionally and'ed to the beginning of
|
||||||
## dynamically built filter. This is mostly used when a custom filter is being
|
## every dynamically built filter. This is mostly used when a custom
|
||||||
## used but MPLS or VLAN tags are on the traffic.
|
## filter is being used but MPLS or VLAN tags are on the traffic.
|
||||||
const restricted_filter = "" &redef;
|
const restricted_filter = "" &redef;
|
||||||
|
|
||||||
## The maximum amount of time that you'd like to allow for BPF filters to compile.
|
## The maximum amount of time that you'd like to allow for BPF filters to compile.
|
||||||
## If this time is exceeded, compensation measures may be taken by the framework
|
## If this time is exceeded, compensation measures may be taken by the framework
|
||||||
## to reduce the filter size. This threshold being crossed also results in
|
## to reduce the filter size. This threshold being crossed also results
|
||||||
## the :bro:see:`PacketFilter::Too_Long_To_Compile_Filter` notice.
|
## in the :bro:see:`PacketFilter::Too_Long_To_Compile_Filter` notice.
|
||||||
const max_filter_compile_time = 100msec &redef;
|
const max_filter_compile_time = 100msec &redef;
|
||||||
|
|
||||||
## Install a BPF filter to exclude some traffic. The filter should positively
|
## Install a BPF filter to exclude some traffic. The filter should
|
||||||
## match what is to be excluded, it will be wrapped in a "not".
|
## positively match what is to be excluded, it will be wrapped in
|
||||||
|
## a "not".
|
||||||
##
|
##
|
||||||
## filter_id: An arbitrary string that can be used to identify
|
## filter_id: An arbitrary string that can be used to identify
|
||||||
## the filter.
|
## the filter.
|
||||||
|
@ -79,9 +80,9 @@ export {
|
||||||
## installed or not.
|
## installed or not.
|
||||||
global exclude: function(filter_id: string, filter: string): bool;
|
global exclude: function(filter_id: string, filter: string): bool;
|
||||||
|
|
||||||
## Install a temporary filter to traffic which should not be passed through
|
## Install a temporary filter to traffic which should not be passed
|
||||||
## the BPF filter. The filter should match the traffic you don't want
|
## through the BPF filter. The filter should match the traffic you
|
||||||
## to see (it will be wrapped in a "not" condition).
|
## don't want to see (it will be wrapped in a "not" condition).
|
||||||
##
|
##
|
||||||
## filter_id: An arbitrary string that can be used to identify
|
## filter_id: An arbitrary string that can be used to identify
|
||||||
## the filter.
|
## the filter.
|
||||||
|
@ -125,7 +126,7 @@ global dynamic_restrict_filters: table[string] of string = {};
|
||||||
# install the filter.
|
# install the filter.
|
||||||
global currently_building = F;
|
global currently_building = F;
|
||||||
|
|
||||||
# Internal tracking for if the the filter being built has possibly been changed.
|
# Internal tracking for if the filter being built has possibly been changed.
|
||||||
global filter_changed = F;
|
global filter_changed = F;
|
||||||
|
|
||||||
global filter_plugins: set[FilterPlugin] = {};
|
global filter_plugins: set[FilterPlugin] = {};
|
||||||
|
|
|
@ -13,7 +13,7 @@ export {
|
||||||
##
|
##
|
||||||
## num_parts: The number of parts the traffic should be split into.
|
## num_parts: The number of parts the traffic should be split into.
|
||||||
##
|
##
|
||||||
## this_part: The part of the traffic this filter will accept. 0-based.
|
## this_part: The part of the traffic this filter will accept (0-based).
|
||||||
global sampling_filter: function(num_parts: count, this_part: count): string;
|
global sampling_filter: function(num_parts: count, this_part: count): string;
|
||||||
|
|
||||||
## Combines two valid BPF filter strings with a string based operator
|
## Combines two valid BPF filter strings with a string based operator
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
##! :bro:see:`Reporter::errors_to_stderr`.
|
##! :bro:see:`Reporter::errors_to_stderr`.
|
||||||
##!
|
##!
|
||||||
##! Note that this framework deals with the handling of internally generated
|
##! Note that this framework deals with the handling of internally generated
|
||||||
##! reporter messages, for the interface in to actually creating interface
|
##! reporter messages, for the interface
|
||||||
##! into actually creating reporter messages from the scripting layer, use
|
##! into actually creating reporter messages from the scripting layer, use
|
||||||
##! the built-in functions in :doc:`/scripts/base/bif/reporter.bif`.
|
##! the built-in functions in :doc:`/scripts/base/bif/reporter.bif`.
|
||||||
|
|
||||||
|
|
4
scripts/base/frameworks/software/README
Normal file
4
scripts/base/frameworks/software/README
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
The software framework provides infrastructure for maintaining a table
|
||||||
|
of software versions seen on the network. The version parsing itself
|
||||||
|
is carried out by external protocol-specific scripts that feed into
|
||||||
|
this framework.
|
|
@ -1,5 +1,5 @@
|
||||||
##! This script provides the framework for software version detection and
|
##! This script provides the framework for software version detection and
|
||||||
##! parsing but doesn't actually do any detection on it's own. It relys on
|
##! parsing but doesn't actually do any detection on it's own. It relies on
|
||||||
##! other protocol specific scripts to parse out software from the protocols
|
##! other protocol specific scripts to parse out software from the protocols
|
||||||
##! that they analyze. The entry point for providing new software detections
|
##! that they analyze. The entry point for providing new software detections
|
||||||
##! to this framework is through the :bro:id:`Software::found` function.
|
##! to this framework is through the :bro:id:`Software::found` function.
|
||||||
|
@ -23,15 +23,15 @@ export {
|
||||||
|
|
||||||
## A structure to represent the numeric version of software.
|
## A structure to represent the numeric version of software.
|
||||||
type Version: record {
|
type Version: record {
|
||||||
## Major version number
|
## Major version number.
|
||||||
major: count &optional;
|
major: count &optional;
|
||||||
## Minor version number
|
## Minor version number.
|
||||||
minor: count &optional;
|
minor: count &optional;
|
||||||
## Minor subversion number
|
## Minor subversion number.
|
||||||
minor2: count &optional;
|
minor2: count &optional;
|
||||||
## Minor updates number
|
## Minor updates number.
|
||||||
minor3: count &optional;
|
minor3: count &optional;
|
||||||
## Additional version string (e.g. "beta42")
|
## Additional version string (e.g. "beta42").
|
||||||
addl: string &optional;
|
addl: string &optional;
|
||||||
} &log;
|
} &log;
|
||||||
|
|
||||||
|
@ -41,7 +41,8 @@ export {
|
||||||
ts: time &log &optional;
|
ts: time &log &optional;
|
||||||
## The IP address detected running the software.
|
## The IP address detected running the software.
|
||||||
host: addr &log;
|
host: addr &log;
|
||||||
## The Port on which the software is running. Only sensible for server software.
|
## The port on which the software is running. Only sensible for
|
||||||
|
## server software.
|
||||||
host_p: port &log &optional;
|
host_p: port &log &optional;
|
||||||
## The type of software detected (e.g. :bro:enum:`HTTP::SERVER`).
|
## The type of software detected (e.g. :bro:enum:`HTTP::SERVER`).
|
||||||
software_type: Type &log &default=UNKNOWN;
|
software_type: Type &log &default=UNKNOWN;
|
||||||
|
@ -49,9 +50,9 @@ export {
|
||||||
name: string &log &optional;
|
name: string &log &optional;
|
||||||
## Version of the software.
|
## Version of the software.
|
||||||
version: Version &log &optional;
|
version: Version &log &optional;
|
||||||
## The full unparsed version string found because the version parsing
|
## The full unparsed version string found because the version
|
||||||
## doesn't always work reliably in all cases and this acts as a
|
## parsing doesn't always work reliably in all cases and this
|
||||||
## fallback in the logs.
|
## acts as a fallback in the logs.
|
||||||
unparsed_version: string &log &optional;
|
unparsed_version: string &log &optional;
|
||||||
|
|
||||||
## This can indicate that this software being detected should
|
## This can indicate that this software being detected should
|
||||||
|
@ -59,13 +60,13 @@ export {
|
||||||
## default, only software that is "interesting" due to a change
|
## default, only software that is "interesting" due to a change
|
||||||
## in version or it being currently unknown is sent to the
|
## in version or it being currently unknown is sent to the
|
||||||
## logging framework. This can be set to T to force the record
|
## logging framework. This can be set to T to force the record
|
||||||
## to be sent to the logging framework if some amount of this tracking
|
## to be sent to the logging framework if some amount of this
|
||||||
## needs to happen in a specific way to the software.
|
## tracking needs to happen in a specific way to the software.
|
||||||
force_log: bool &default=F;
|
force_log: bool &default=F;
|
||||||
};
|
};
|
||||||
|
|
||||||
## Hosts whose software should be detected and tracked.
|
## Hosts whose software should be detected and tracked.
|
||||||
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS
|
## Choices are: LOCAL_HOSTS, REMOTE_HOSTS, ALL_HOSTS, NO_HOSTS.
|
||||||
const asset_tracking = LOCAL_HOSTS &redef;
|
const asset_tracking = LOCAL_HOSTS &redef;
|
||||||
|
|
||||||
## Other scripts should call this function when they detect software.
|
## Other scripts should call this function when they detect software.
|
||||||
|
@ -79,14 +80,14 @@ export {
|
||||||
## Compare two version records.
|
## Compare two version records.
|
||||||
##
|
##
|
||||||
## Returns: -1 for v1 < v2, 0 for v1 == v2, 1 for v1 > v2.
|
## Returns: -1 for v1 < v2, 0 for v1 == v2, 1 for v1 > v2.
|
||||||
## If the numerical version numbers match, the addl string
|
## If the numerical version numbers match, the *addl* string
|
||||||
## is compared lexicographically.
|
## is compared lexicographically.
|
||||||
global cmp_versions: function(v1: Version, v2: Version): int;
|
global cmp_versions: function(v1: Version, v2: Version): int;
|
||||||
|
|
||||||
## Type to represent a collection of :bro:type:`Software::Info` records.
|
## Type to represent a collection of :bro:type:`Software::Info` records.
|
||||||
## It's indexed with the name of a piece of software such as "Firefox"
|
## It's indexed with the name of a piece of software such as "Firefox"
|
||||||
## and it yields a :bro:type:`Software::Info` record with more information
|
## and it yields a :bro:type:`Software::Info` record with more
|
||||||
## about the software.
|
## information about the software.
|
||||||
type SoftwareSet: table[string] of Info;
|
type SoftwareSet: table[string] of Info;
|
||||||
|
|
||||||
## The set of software associated with an address. Data expires from
|
## The set of software associated with an address. Data expires from
|
||||||
|
|
2
scripts/base/frameworks/sumstats/README
Normal file
2
scripts/base/frameworks/sumstats/README
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
The summary statistics framework provides a way to summarize large streams
|
||||||
|
of data into simple reduced measurements.
|
|
@ -1,6 +1,6 @@
|
||||||
##! This implements transparent cluster support for the SumStats framework.
|
##! This implements transparent cluster support for the SumStats framework.
|
||||||
##! Do not load this file directly. It's only meant to be loaded automatically
|
##! Do not load this file directly. It's only meant to be loaded automatically
|
||||||
##! and will be depending on if the cluster framework has been enabled.
|
##! and will be if the cluster framework has been enabled.
|
||||||
##! The goal of this script is to make sumstats calculation completely and
|
##! The goal of this script is to make sumstats calculation completely and
|
||||||
##! transparently automated when running on a cluster.
|
##! transparently automated when running on a cluster.
|
||||||
|
|
||||||
|
@ -10,31 +10,32 @@
|
||||||
module SumStats;
|
module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## The percent of the full threshold value that needs to be met on a single worker
|
## The percent of the full threshold value that needs to be met on a
|
||||||
## for that worker to send the value to its manager in order for it to request a
|
## single worker for that worker to send the value to its manager in
|
||||||
## global view for that value. There is no requirement that the manager requests
|
## order for it to request a global view for that value. There is no
|
||||||
## a global view for the key since it may opt not to if it requested a global view
|
## requirement that the manager requests a global view for the key since
|
||||||
## for the key recently.
|
## it may opt not to if it requested a global view for the key recently.
|
||||||
const cluster_request_global_view_percent = 0.2 &redef;
|
const cluster_request_global_view_percent = 0.2 &redef;
|
||||||
|
|
||||||
## This is to deal with intermediate update overload. A manager will only allow
|
## This is to deal with intermediate update overload. A manager will
|
||||||
## this many intermediate update requests to the workers to be inflight at any
|
## only allow this many intermediate update requests to the workers to
|
||||||
## given time. Requested intermediate updates are currently thrown out and not
|
## be inflight at any given time. Requested intermediate updates are
|
||||||
## performed. In practice this should hopefully have a minimal effect.
|
## currently thrown out and not performed. In practice this should
|
||||||
|
## hopefully have a minimal effect.
|
||||||
const max_outstanding_global_views = 10 &redef;
|
const max_outstanding_global_views = 10 &redef;
|
||||||
|
|
||||||
## Event sent by the manager in a cluster to initiate the collection of values for
|
## Event sent by the manager in a cluster to initiate the collection of
|
||||||
## a sumstat.
|
## values for a sumstat.
|
||||||
global cluster_ss_request: event(uid: string, ss_name: string, cleanup: bool);
|
global cluster_ss_request: event(uid: string, ss_name: string, cleanup: bool);
|
||||||
|
|
||||||
## Event sent by nodes that are collecting sumstats after receiving a request for
|
## Event sent by nodes that are collecting sumstats after receiving a
|
||||||
## the sumstat from the manager.
|
## request for the sumstat from the manager.
|
||||||
#global cluster_ss_response: event(uid: string, ss_name: string, data: ResultTable, done: bool, cleanup: bool);
|
#global cluster_ss_response: event(uid: string, ss_name: string, data: ResultTable, done: bool, cleanup: bool);
|
||||||
|
|
||||||
## This event is sent by the manager in a cluster to initiate the collection of
|
## This event is sent by the manager in a cluster to initiate the
|
||||||
## a single key value from a sumstat. It's typically used to get intermediate
|
## collection of a single key value from a sumstat. It's typically used
|
||||||
## updates before the break interval triggers to speed detection of a value
|
## to get intermediate updates before the break interval triggers to
|
||||||
## crossing a threshold.
|
## speed detection of a value crossing a threshold.
|
||||||
global cluster_get_result: event(uid: string, ss_name: string, key: Key, cleanup: bool);
|
global cluster_get_result: event(uid: string, ss_name: string, key: Key, cleanup: bool);
|
||||||
|
|
||||||
## This event is sent by nodes in response to a
|
## This event is sent by nodes in response to a
|
||||||
|
@ -43,7 +44,7 @@ export {
|
||||||
|
|
||||||
## This is sent by workers to indicate that they crossed the percent
|
## This is sent by workers to indicate that they crossed the percent
|
||||||
## of the current threshold by the percentage defined globally in
|
## of the current threshold by the percentage defined globally in
|
||||||
## :bro:id:`SumStats::cluster_request_global_view_percent`
|
## :bro:id:`SumStats::cluster_request_global_view_percent`.
|
||||||
global cluster_key_intermediate_response: event(ss_name: string, key: SumStats::Key);
|
global cluster_key_intermediate_response: event(ss_name: string, key: SumStats::Key);
|
||||||
|
|
||||||
## This event is scheduled internally on workers to send result chunks.
|
## This event is scheduled internally on workers to send result chunks.
|
||||||
|
|
|
@ -51,8 +51,8 @@ export {
|
||||||
## would like to accept the data being inserted.
|
## would like to accept the data being inserted.
|
||||||
pred: function(key: SumStats::Key, obs: SumStats::Observation): bool &optional;
|
pred: function(key: SumStats::Key, obs: SumStats::Observation): bool &optional;
|
||||||
|
|
||||||
## A function to normalize the key. This can be used to aggregate or
|
## A function to normalize the key. This can be used to
|
||||||
## normalize the entire key.
|
## aggregate or normalize the entire key.
|
||||||
normalize_key: function(key: SumStats::Key): Key &optional;
|
normalize_key: function(key: SumStats::Key): Key &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -91,28 +91,28 @@ export {
|
||||||
name: string;
|
name: string;
|
||||||
|
|
||||||
## The interval at which this filter should be "broken"
|
## The interval at which this filter should be "broken"
|
||||||
## and the '$epoch_result' callback called. The
|
## and the *epoch_result* callback called. The
|
||||||
## results are also reset at this time so any threshold
|
## results are also reset at this time so any threshold
|
||||||
## based detection needs to be set to a
|
## based detection needs to be set to a
|
||||||
## value that should be expected to happen within
|
## value that should be expected to happen within
|
||||||
## this epoch.
|
## this epoch.
|
||||||
epoch: interval;
|
epoch: interval;
|
||||||
|
|
||||||
## The reducers for the SumStat
|
## The reducers for the SumStat.
|
||||||
reducers: set[Reducer];
|
reducers: set[Reducer];
|
||||||
|
|
||||||
## Provide a function to calculate a value from the
|
## Provide a function to calculate a value from the
|
||||||
## :bro:see:`SumStats::Result` structure which will be used
|
## :bro:see:`SumStats::Result` structure which will be used
|
||||||
## for thresholding.
|
## for thresholding.
|
||||||
## This is required if a $threshold value is given.
|
## This is required if a *threshold* value is given.
|
||||||
threshold_val: function(key: SumStats::Key, result: SumStats::Result): double &optional;
|
threshold_val: function(key: SumStats::Key, result: SumStats::Result): double &optional;
|
||||||
|
|
||||||
## The threshold value for calling the
|
## The threshold value for calling the
|
||||||
## $threshold_crossed callback.
|
## *threshold_crossed* callback.
|
||||||
threshold: double &optional;
|
threshold: double &optional;
|
||||||
|
|
||||||
## A series of thresholds for calling the
|
## A series of thresholds for calling the
|
||||||
## $threshold_crossed callback.
|
## *threshold_crossed* callback.
|
||||||
threshold_series: vector of double &optional;
|
threshold_series: vector of double &optional;
|
||||||
|
|
||||||
## A callback that is called when a threshold is crossed.
|
## A callback that is called when a threshold is crossed.
|
||||||
|
@ -124,7 +124,7 @@ export {
|
||||||
epoch_result: function(ts: time, key: SumStats::Key, result: SumStats::Result) &optional;
|
epoch_result: function(ts: time, key: SumStats::Key, result: SumStats::Result) &optional;
|
||||||
|
|
||||||
## A callback that will be called when a single collection
|
## A callback that will be called when a single collection
|
||||||
## interval is completed. The ts value will be the time of
|
## interval is completed. The *ts* value will be the time of
|
||||||
## when the collection started.
|
## when the collection started.
|
||||||
epoch_finished: function(ts:time) &optional;
|
epoch_finished: function(ts:time) &optional;
|
||||||
};
|
};
|
||||||
|
|
|
@ -5,12 +5,12 @@ module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
## Keep last X observations in a queue
|
## Keep last X observations in a queue.
|
||||||
LAST
|
LAST
|
||||||
};
|
};
|
||||||
|
|
||||||
redef record Reducer += {
|
redef record Reducer += {
|
||||||
## number of elements to keep.
|
## Number of elements to keep.
|
||||||
num_last_elements: count &default=0;
|
num_last_elements: count &default=0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,8 @@ module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef enum Calculation += {
|
redef enum Calculation += {
|
||||||
## Get uniquely distributed random samples from the observation stream.
|
## Get uniquely distributed random samples from the observation
|
||||||
|
## stream.
|
||||||
SAMPLE
|
SAMPLE
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -24,8 +25,8 @@ export {
|
||||||
|
|
||||||
redef record ResultVal += {
|
redef record ResultVal += {
|
||||||
# Internal use only. This is not meant to be publically available
|
# Internal use only. This is not meant to be publically available
|
||||||
# and just a copy of num_samples from the Reducer. Needed for availability
|
# and just a copy of num_samples from the Reducer. Needed for
|
||||||
# in the compose hook.
|
# availability in the compose hook.
|
||||||
num_samples: count &default=0;
|
num_samples: count &default=0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ module SumStats;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
redef record Reducer += {
|
redef record Reducer += {
|
||||||
## number of elements to keep in the top-k list
|
## Number of elements to keep in the top-k list.
|
||||||
topk_size: count &default=500;
|
topk_size: count &default=500;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ redef record ResultVal += {
|
||||||
|
|
||||||
# Internal use only. This is not meant to be publically available
|
# Internal use only. This is not meant to be publically available
|
||||||
# because we don't want to trust that we can inspect the values
|
# because we don't want to trust that we can inspect the values
|
||||||
# since we will like move to a probalistic data structure in the future.
|
# since we will likely move to a probabilistic data structure in the future.
|
||||||
# TODO: in the future this will optionally be a hyperloglog structure
|
# TODO: in the future this will optionally be a hyperloglog structure
|
||||||
unique_vals: set[Observation] &optional;
|
unique_vals: set[Observation] &optional;
|
||||||
};
|
};
|
||||||
|
|
2
scripts/base/frameworks/tunnels/README
Normal file
2
scripts/base/frameworks/tunnels/README
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
The tunnels framework handles the tracking/logging of tunnels (e.g. Teredo,
|
||||||
|
AYIYA, or IP-in-IP such as 6to4 where "IP" is either IPv4 or IPv6).
|
|
@ -29,8 +29,8 @@ export {
|
||||||
## The unique identifier for the tunnel, which may correspond
|
## The unique identifier for the tunnel, which may correspond
|
||||||
## to a :bro:type:`connection`'s *uid* field for non-IP-in-IP tunnels.
|
## to a :bro:type:`connection`'s *uid* field for non-IP-in-IP tunnels.
|
||||||
## This is optional because there could be numerous connections
|
## This is optional because there could be numerous connections
|
||||||
## for payload proxies like SOCKS but we should treat it as a single
|
## for payload proxies like SOCKS but we should treat it as a
|
||||||
## tunnel.
|
## single tunnel.
|
||||||
uid: string &log &optional;
|
uid: string &log &optional;
|
||||||
## The tunnel "connection" 4-tuple of endpoint addresses/ports.
|
## The tunnel "connection" 4-tuple of endpoint addresses/ports.
|
||||||
## For an IP tunnel, the ports will be 0.
|
## For an IP tunnel, the ports will be 0.
|
||||||
|
@ -76,8 +76,8 @@ export {
|
||||||
## connections before it is considered inactive/expired.
|
## connections before it is considered inactive/expired.
|
||||||
const expiration_interval = 1hrs &redef;
|
const expiration_interval = 1hrs &redef;
|
||||||
|
|
||||||
## Currently active tunnels. That is, tunnels for which new, encapsulated
|
## Currently active tunnels. That is, tunnels for which new,
|
||||||
## connections have been seen in the interval indicated by
|
## encapsulated connections have been seen in the interval indicated by
|
||||||
## :bro:see:`Tunnel::expiration_interval`.
|
## :bro:see:`Tunnel::expiration_interval`.
|
||||||
global active: table[conn_id] of Info = table() &read_expire=expiration_interval &expire_func=expire;
|
global active: table[conn_id] of Info = table() &read_expire=expiration_interval &expire_func=expire;
|
||||||
}
|
}
|
||||||
|
|
|
@ -888,13 +888,13 @@ const tcp_excessive_data_without_further_acks = 10 * 1024 * 1024 &redef;
|
||||||
## For services without an a handler, these sets define originator-side ports that
|
## For services without an a handler, these sets define originator-side ports that
|
||||||
## still trigger reassembly.
|
## still trigger reassembly.
|
||||||
##
|
##
|
||||||
## .. :bro:see:: tcp_reassembler_ports_resp
|
## .. bro:see:: tcp_reassembler_ports_resp
|
||||||
const tcp_reassembler_ports_orig: set[port] = {} &redef;
|
const tcp_reassembler_ports_orig: set[port] = {} &redef;
|
||||||
|
|
||||||
## For services without an a handler, these sets define responder-side ports that
|
## For services without an a handler, these sets define responder-side ports that
|
||||||
## still trigger reassembly.
|
## still trigger reassembly.
|
||||||
##
|
##
|
||||||
## .. :bro:see:: tcp_reassembler_ports_orig
|
## .. bro:see:: tcp_reassembler_ports_orig
|
||||||
const tcp_reassembler_ports_resp: set[port] = {} &redef;
|
const tcp_reassembler_ports_resp: set[port] = {} &redef;
|
||||||
|
|
||||||
## Defines destination TCP ports for which the contents of the originator stream
|
## Defines destination TCP ports for which the contents of the originator stream
|
||||||
|
@ -987,7 +987,7 @@ const table_incremental_step = 5000 &redef;
|
||||||
## When expiring table entries, wait this amount of time before checking the next
|
## When expiring table entries, wait this amount of time before checking the next
|
||||||
## chunk of entries.
|
## chunk of entries.
|
||||||
##
|
##
|
||||||
## .. :bro:see:: table_expire_interval table_incremental_step
|
## .. bro:see:: table_expire_interval table_incremental_step
|
||||||
const table_expire_delay = 0.01 secs &redef;
|
const table_expire_delay = 0.01 secs &redef;
|
||||||
|
|
||||||
## Time to wait before timing out a DNS request.
|
## Time to wait before timing out a DNS request.
|
||||||
|
@ -1676,7 +1676,7 @@ global secondary_filters: table[string] of event(filter: string, pkt: pkt_hdr)
|
||||||
|
|
||||||
## Maximum length of payload passed to discarder functions.
|
## Maximum length of payload passed to discarder functions.
|
||||||
##
|
##
|
||||||
## .. :bro:see:: discarder_check_tcp discarder_check_udp discarder_check_icmp
|
## .. bro:see:: discarder_check_tcp discarder_check_udp discarder_check_icmp
|
||||||
## discarder_check_ip
|
## discarder_check_ip
|
||||||
global discarder_maxlen = 128 &redef;
|
global discarder_maxlen = 128 &redef;
|
||||||
|
|
||||||
|
@ -1689,7 +1689,7 @@ global discarder_maxlen = 128 &redef;
|
||||||
##
|
##
|
||||||
## Returns: True if the packet should not be analyzed any further.
|
## Returns: True if the packet should not be analyzed any further.
|
||||||
##
|
##
|
||||||
## .. :bro:see:: discarder_check_tcp discarder_check_udp discarder_check_icmp
|
## .. bro:see:: discarder_check_tcp discarder_check_udp discarder_check_icmp
|
||||||
## discarder_maxlen
|
## discarder_maxlen
|
||||||
##
|
##
|
||||||
## .. note:: This is very low-level functionality and potentially expensive.
|
## .. note:: This is very low-level functionality and potentially expensive.
|
||||||
|
@ -1707,7 +1707,7 @@ global discarder_check_ip: function(p: pkt_hdr): bool;
|
||||||
##
|
##
|
||||||
## Returns: True if the packet should not be analyzed any further.
|
## Returns: True if the packet should not be analyzed any further.
|
||||||
##
|
##
|
||||||
## .. :bro:see:: discarder_check_ip discarder_check_udp discarder_check_icmp
|
## .. bro:see:: discarder_check_ip discarder_check_udp discarder_check_icmp
|
||||||
## discarder_maxlen
|
## discarder_maxlen
|
||||||
##
|
##
|
||||||
## .. note:: This is very low-level functionality and potentially expensive.
|
## .. note:: This is very low-level functionality and potentially expensive.
|
||||||
|
@ -1725,7 +1725,7 @@ global discarder_check_tcp: function(p: pkt_hdr, d: string): bool;
|
||||||
##
|
##
|
||||||
## Returns: True if the packet should not be analyzed any further.
|
## Returns: True if the packet should not be analyzed any further.
|
||||||
##
|
##
|
||||||
## .. :bro:see:: discarder_check_ip discarder_check_tcp discarder_check_icmp
|
## .. bro:see:: discarder_check_ip discarder_check_tcp discarder_check_icmp
|
||||||
## discarder_maxlen
|
## discarder_maxlen
|
||||||
##
|
##
|
||||||
## .. note:: This is very low-level functionality and potentially expensive.
|
## .. note:: This is very low-level functionality and potentially expensive.
|
||||||
|
@ -1741,7 +1741,7 @@ global discarder_check_udp: function(p: pkt_hdr, d: string): bool;
|
||||||
##
|
##
|
||||||
## Returns: True if the packet should not be analyzed any further.
|
## Returns: True if the packet should not be analyzed any further.
|
||||||
##
|
##
|
||||||
## .. :bro:see:: discarder_check_ip discarder_check_tcp discarder_check_udp
|
## .. bro:see:: discarder_check_ip discarder_check_tcp discarder_check_udp
|
||||||
## discarder_maxlen
|
## discarder_maxlen
|
||||||
##
|
##
|
||||||
## .. note:: This is very low-level functionality and potentially expensive.
|
## .. note:: This is very low-level functionality and potentially expensive.
|
||||||
|
@ -1935,11 +1935,11 @@ export {
|
||||||
## .. .. bro:see:: return_data_max return_data_first_only
|
## .. .. bro:see:: return_data_max return_data_first_only
|
||||||
const return_data = F &redef;
|
const return_data = F &redef;
|
||||||
|
|
||||||
## If bro:id:`NFS3::return_data` is true, how much data should be returned at
|
## If :bro:id:`NFS3::return_data` is true, how much data should be returned at
|
||||||
## most.
|
## most.
|
||||||
const return_data_max = 512 &redef;
|
const return_data_max = 512 &redef;
|
||||||
|
|
||||||
## If bro:id:`NFS3::return_data` is true, whether to *only* return data if the read
|
## If :bro:id:`NFS3::return_data` is true, whether to *only* return data if the read
|
||||||
## or write offset is 0, i.e., only return data for the beginning of the file.
|
## or write offset is 0, i.e., only return data for the beginning of the file.
|
||||||
const return_data_first_only = T &redef;
|
const return_data_first_only = T &redef;
|
||||||
|
|
||||||
|
@ -2827,7 +2827,7 @@ const report_gaps_for_partial = F &redef;
|
||||||
## Flag to prevent Bro from exiting automatically when input is exhausted.
|
## Flag to prevent Bro from exiting automatically when input is exhausted.
|
||||||
## Normally Bro terminates when all packets sources have gone dry
|
## Normally Bro terminates when all packets sources have gone dry
|
||||||
## and communication isn't enabled. If this flag is set, Bro's main loop will
|
## and communication isn't enabled. If this flag is set, Bro's main loop will
|
||||||
## instead keep idleing until :bro:see::`terminate` is explicitly called.
|
## instead keep idleing until :bro:see:`terminate` is explicitly called.
|
||||||
##
|
##
|
||||||
## This is mainly for testing purposes when termination behaviour needs to be
|
## This is mainly for testing purposes when termination behaviour needs to be
|
||||||
## controlled for reproducing results.
|
## controlled for reproducing results.
|
||||||
|
|
1
scripts/base/protocols/conn/README
Normal file
1
scripts/base/protocols/conn/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for connection (TCP, UDP, or ICMP) analysis.
|
|
@ -16,12 +16,12 @@
|
||||||
module Conn;
|
module Conn;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## The prefix given to files containing extracted connections as they are
|
## The prefix given to files containing extracted connections as they
|
||||||
## opened on disk.
|
## are opened on disk.
|
||||||
const extraction_prefix = "contents" &redef;
|
const extraction_prefix = "contents" &redef;
|
||||||
|
|
||||||
## If this variable is set to ``T``, then all contents of all connections
|
## If this variable is set to ``T``, then all contents of all
|
||||||
## will be extracted.
|
## connections will be extracted.
|
||||||
const default_extract = F &redef;
|
const default_extract = F &redef;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
##! This script manages the tracking/logging of general information regarding
|
##! This script manages the tracking/logging of general information regarding
|
||||||
##! TCP, UDP, and ICMP traffic. For UDP and ICMP, "connections" are to
|
##! TCP, UDP, and ICMP traffic. For UDP and ICMP, "connections" are to
|
||||||
##! be interpreted using flow semantics (sequence of packets from a source
|
##! be interpreted using flow semantics (sequence of packets from a source
|
||||||
##! host/post to a destination host/port). Further, ICMP "ports" are to
|
##! host/port to a destination host/port). Further, ICMP "ports" are to
|
||||||
##! be interpreted as the source port meaning the ICMP message type and
|
##! be interpreted as the source port meaning the ICMP message type and
|
||||||
##! the destination port being the ICMP message code.
|
##! the destination port being the ICMP message code.
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ export {
|
||||||
id: conn_id &log;
|
id: conn_id &log;
|
||||||
## The transport layer protocol of the connection.
|
## The transport layer protocol of the connection.
|
||||||
proto: transport_proto &log;
|
proto: transport_proto &log;
|
||||||
## An identification of an application protocol being sent over the
|
## An identification of an application protocol being sent over
|
||||||
## the connection.
|
## the connection.
|
||||||
service: string &log &optional;
|
service: string &log &optional;
|
||||||
## How long the connection lasted. For 3-way or 4-way connection
|
## How long the connection lasted. For 3-way or 4-way connection
|
||||||
|
@ -31,9 +31,10 @@ export {
|
||||||
duration: interval &log &optional;
|
duration: interval &log &optional;
|
||||||
## The number of payload bytes the originator sent. For TCP
|
## The number of payload bytes the originator sent. For TCP
|
||||||
## this is taken from sequence numbers and might be inaccurate
|
## this is taken from sequence numbers and might be inaccurate
|
||||||
## (e.g., due to large connections)
|
## (e.g., due to large connections).
|
||||||
orig_bytes: count &log &optional;
|
orig_bytes: count &log &optional;
|
||||||
## The number of payload bytes the responder sent. See ``orig_bytes``.
|
## The number of payload bytes the responder sent. See
|
||||||
|
## *orig_bytes*.
|
||||||
resp_bytes: count &log &optional;
|
resp_bytes: count &log &optional;
|
||||||
|
|
||||||
## ========== ===============================================
|
## ========== ===============================================
|
||||||
|
@ -55,20 +56,20 @@ export {
|
||||||
## ========== ===============================================
|
## ========== ===============================================
|
||||||
conn_state: string &log &optional;
|
conn_state: string &log &optional;
|
||||||
|
|
||||||
## If the connection is originated locally, this value will be T. If
|
## If the connection is originated locally, this value will be T.
|
||||||
## it was originated remotely it will be F. In the case that the
|
## If it was originated remotely it will be F. In the case that
|
||||||
## :bro:id:`Site::local_nets` variable is undefined, this field will
|
## the :bro:id:`Site::local_nets` variable is undefined, this
|
||||||
## be left empty at all times.
|
## field will be left empty at all times.
|
||||||
local_orig: bool &log &optional;
|
local_orig: bool &log &optional;
|
||||||
|
|
||||||
## Indicates the number of bytes missed in content gaps, which is
|
## Indicates the number of bytes missed in content gaps, which
|
||||||
## representative of packet loss. A value other than zero will
|
## is representative of packet loss. A value other than zero
|
||||||
## normally cause protocol analysis to fail but some analysis may
|
## will normally cause protocol analysis to fail but some
|
||||||
## have been completed prior to the packet loss.
|
## analysis may have been completed prior to the packet loss.
|
||||||
missed_bytes: count &log &default=0;
|
missed_bytes: count &log &default=0;
|
||||||
|
|
||||||
## Records the state history of connections as a string of letters.
|
## Records the state history of connections as a string of
|
||||||
## The meaning of those letters is:
|
## letters. The meaning of those letters is:
|
||||||
##
|
##
|
||||||
## ====== ====================================================
|
## ====== ====================================================
|
||||||
## Letter Meaning
|
## Letter Meaning
|
||||||
|
@ -83,24 +84,25 @@ export {
|
||||||
## i inconsistent packet (e.g. SYN+RST bits both set)
|
## i inconsistent packet (e.g. SYN+RST bits both set)
|
||||||
## ====== ====================================================
|
## ====== ====================================================
|
||||||
##
|
##
|
||||||
## If the event comes from the originator, the letter is in upper-case; if it comes
|
## If the event comes from the originator, the letter is in
|
||||||
## from the responder, it's in lower-case. Multiple packets of the same type will
|
## upper-case; if it comes from the responder, it's in
|
||||||
## only be noted once (e.g. we only record one "d" in each direction, regardless of
|
## lower-case. Multiple packets of the same type will only be
|
||||||
## how many data packets were seen.)
|
## noted once (e.g. we only record one "d" in each direction,
|
||||||
|
## regardless of how many data packets were seen.)
|
||||||
history: string &log &optional;
|
history: string &log &optional;
|
||||||
## Number of packets that the originator sent.
|
## Number of packets that the originator sent.
|
||||||
## Only set if :bro:id:`use_conn_size_analyzer` = T
|
## Only set if :bro:id:`use_conn_size_analyzer` = T.
|
||||||
orig_pkts: count &log &optional;
|
orig_pkts: count &log &optional;
|
||||||
## Number of IP level bytes that the originator sent (as seen on the wire,
|
## Number of IP level bytes that the originator sent (as seen on
|
||||||
## taken from IP total_length header field).
|
## the wire, taken from the IP total_length header field).
|
||||||
## Only set if :bro:id:`use_conn_size_analyzer` = T
|
## Only set if :bro:id:`use_conn_size_analyzer` = T.
|
||||||
orig_ip_bytes: count &log &optional;
|
orig_ip_bytes: count &log &optional;
|
||||||
## Number of packets that the responder sent.
|
## Number of packets that the responder sent.
|
||||||
## Only set if :bro:id:`use_conn_size_analyzer` = T
|
## Only set if :bro:id:`use_conn_size_analyzer` = T.
|
||||||
resp_pkts: count &log &optional;
|
resp_pkts: count &log &optional;
|
||||||
## Number og IP level bytes that the responder sent (as seen on the wire,
|
## Number of IP level bytes that the responder sent (as seen on
|
||||||
## taken from IP total_length header field).
|
## the wire, taken from the IP total_length header field).
|
||||||
## Only set if :bro:id:`use_conn_size_analyzer` = T
|
## Only set if :bro:id:`use_conn_size_analyzer` = T.
|
||||||
resp_ip_bytes: count &log &optional;
|
resp_ip_bytes: count &log &optional;
|
||||||
## If this connection was over a tunnel, indicate the
|
## If this connection was over a tunnel, indicate the
|
||||||
## *uid* values for any encapsulating parent connections
|
## *uid* values for any encapsulating parent connections
|
||||||
|
|
|
@ -11,10 +11,11 @@ export {
|
||||||
## c: The connection to watch.
|
## c: The connection to watch.
|
||||||
##
|
##
|
||||||
## callback: A callback function that takes as arguments the monitored
|
## callback: A callback function that takes as arguments the monitored
|
||||||
## *connection*, and counter *cnt* that increments each time the
|
## *connection*, and counter *cnt* that increments each time
|
||||||
## callback is called. It returns an interval indicating how long
|
## the callback is called. It returns an interval indicating
|
||||||
## in the future to schedule an event which will call the
|
## how long in the future to schedule an event which will call
|
||||||
## callback. A negative return interval causes polling to stop.
|
## the callback. A negative return interval causes polling
|
||||||
|
## to stop.
|
||||||
##
|
##
|
||||||
## cnt: The initial value of a counter which gets passed to *callback*.
|
## cnt: The initial value of a counter which gets passed to *callback*.
|
||||||
##
|
##
|
||||||
|
|
1
scripts/base/protocols/dhcp/README
Normal file
1
scripts/base/protocols/dhcp/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for Dynamic Host Configuration Protocol (DHCP) analysis.
|
|
@ -5,7 +5,7 @@ module DHCP;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
|
|
||||||
## Types of DHCP messages. See RFC 1533.
|
## Types of DHCP messages. See :rfc:`1533`.
|
||||||
const message_types = {
|
const message_types = {
|
||||||
[1] = "DHCP_DISCOVER",
|
[1] = "DHCP_DISCOVER",
|
||||||
[2] = "DHCP_OFFER",
|
[2] = "DHCP_OFFER",
|
||||||
|
|
|
@ -3,7 +3,8 @@
|
||||||
##! noisy on most networks, and focuses on the end-result: assigned leases.
|
##! noisy on most networks, and focuses on the end-result: assigned leases.
|
||||||
##!
|
##!
|
||||||
##! If you'd like to track known DHCP devices and to log the hostname
|
##! If you'd like to track known DHCP devices and to log the hostname
|
||||||
##! supplied by the client, see policy/protocols/dhcp/known-devices.bro
|
##! supplied by the client, see
|
||||||
|
##! :doc:`/scripts/policy/protocols/dhcp/known-devices-and-hostnames`.
|
||||||
|
|
||||||
@load ./utils.bro
|
@load ./utils.bro
|
||||||
|
|
||||||
|
@ -18,7 +19,7 @@ export {
|
||||||
## associated connection is observed.
|
## associated connection is observed.
|
||||||
ts: time &log;
|
ts: time &log;
|
||||||
## A unique identifier of the connection over which DHCP is
|
## A unique identifier of the connection over which DHCP is
|
||||||
## occuring.
|
## occurring.
|
||||||
uid: string &log;
|
uid: string &log;
|
||||||
## The connection's 4-tuple of endpoint addresses/ports.
|
## The connection's 4-tuple of endpoint addresses/ports.
|
||||||
id: conn_id &log;
|
id: conn_id &log;
|
||||||
|
@ -28,7 +29,7 @@ export {
|
||||||
assigned_ip: addr &log &optional;
|
assigned_ip: addr &log &optional;
|
||||||
## IP address lease interval.
|
## IP address lease interval.
|
||||||
lease_time: interval &log &optional;
|
lease_time: interval &log &optional;
|
||||||
## A random number choosen by the client for this transaction.
|
## A random number chosen by the client for this transaction.
|
||||||
trans_id: count &log;
|
trans_id: count &log;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -37,7 +38,7 @@ export {
|
||||||
global log_dhcp: event(rec: Info);
|
global log_dhcp: event(rec: Info);
|
||||||
}
|
}
|
||||||
|
|
||||||
# Add the dhcp info to the connection record
|
# Add the dhcp info to the connection record.
|
||||||
redef record connection += {
|
redef record connection += {
|
||||||
dhcp: Info &optional;
|
dhcp: Info &optional;
|
||||||
};
|
};
|
||||||
|
|
|
@ -3,11 +3,11 @@
|
||||||
module DHCP;
|
module DHCP;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## Reverse the octets of an IPv4 IP.
|
## Reverse the octets of an IPv4 address.
|
||||||
##
|
##
|
||||||
## ip: An :bro:type:`addr` IPv4 address.
|
## ip: An IPv4 address.
|
||||||
##
|
##
|
||||||
## Returns: A reversed addr.
|
## Returns: A reversed IPv4 address.
|
||||||
global reverse_ip: function(ip: addr): addr;
|
global reverse_ip: function(ip: addr): addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
1
scripts/base/protocols/dnp3/README
Normal file
1
scripts/base/protocols/dnp3/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for Distributed Network Protocol (DNP3) analysis.
|
|
@ -10,7 +10,7 @@ export {
|
||||||
type Info: record {
|
type Info: record {
|
||||||
## Time of the request.
|
## Time of the request.
|
||||||
ts: time &log;
|
ts: time &log;
|
||||||
## Unique identifier for the connnection.
|
## Unique identifier for the connection.
|
||||||
uid: string &log;
|
uid: string &log;
|
||||||
## Identifier for the connection.
|
## Identifier for the connection.
|
||||||
id: conn_id &log;
|
id: conn_id &log;
|
||||||
|
|
1
scripts/base/protocols/dns/README
Normal file
1
scripts/base/protocols/dns/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for Domain Name System (DNS) protocol analysis.
|
|
@ -8,7 +8,8 @@ export {
|
||||||
const EDNS = 41; ##< An OPT RR TYPE value described by EDNS.
|
const EDNS = 41; ##< An OPT RR TYPE value described by EDNS.
|
||||||
const ANY = 255; ##< A QTYPE value describing a request for all records.
|
const ANY = 255; ##< A QTYPE value describing a request for all records.
|
||||||
|
|
||||||
## Mapping of DNS query type codes to human readable string representation.
|
## Mapping of DNS query type codes to human readable string
|
||||||
|
## representation.
|
||||||
const query_types = {
|
const query_types = {
|
||||||
[1] = "A", [2] = "NS", [3] = "MD", [4] = "MF",
|
[1] = "A", [2] = "NS", [3] = "MD", [4] = "MF",
|
||||||
[5] = "CNAME", [6] = "SOA", [7] = "MB", [8] = "MG",
|
[5] = "CNAME", [6] = "SOA", [7] = "MB", [8] = "MG",
|
||||||
|
@ -64,8 +65,8 @@ export {
|
||||||
[32768] = "DNS_SEC_OK", # accepts DNS Sec RRs
|
[32768] = "DNS_SEC_OK", # accepts DNS Sec RRs
|
||||||
} &default="?";
|
} &default="?";
|
||||||
|
|
||||||
## Possible values of the CLASS field in resource records or QCLASS field
|
## Possible values of the CLASS field in resource records or QCLASS
|
||||||
## in query messages.
|
## field in query messages.
|
||||||
const classes = {
|
const classes = {
|
||||||
[1] = "C_INTERNET",
|
[1] = "C_INTERNET",
|
||||||
[2] = "C_CSNET",
|
[2] = "C_CSNET",
|
||||||
|
|
|
@ -22,8 +22,8 @@ export {
|
||||||
id: conn_id &log;
|
id: conn_id &log;
|
||||||
## The transport layer protocol of the connection.
|
## The transport layer protocol of the connection.
|
||||||
proto: transport_proto &log;
|
proto: transport_proto &log;
|
||||||
## A 16 bit identifier assigned by the program that generated the
|
## A 16-bit identifier assigned by the program that generated
|
||||||
## DNS query. Also used in responses to match up replies to
|
## the DNS query. Also used in responses to match up replies to
|
||||||
## outstanding queries.
|
## outstanding queries.
|
||||||
trans_id: count &log &optional;
|
trans_id: count &log &optional;
|
||||||
## The domain name that is the subject of the DNS query.
|
## The domain name that is the subject of the DNS query.
|
||||||
|
@ -40,17 +40,17 @@ export {
|
||||||
rcode: count &log &optional;
|
rcode: count &log &optional;
|
||||||
## A descriptive name for the response code value.
|
## A descriptive name for the response code value.
|
||||||
rcode_name: string &log &optional;
|
rcode_name: string &log &optional;
|
||||||
## The Authoritative Answer bit for response messages specifies that
|
## The Authoritative Answer bit for response messages specifies
|
||||||
## the responding name server is an authority for the domain name
|
## that the responding name server is an authority for the
|
||||||
## in the question section.
|
## domain name in the question section.
|
||||||
AA: bool &log &default=F;
|
AA: bool &log &default=F;
|
||||||
## The Truncation bit specifies that the message was truncated.
|
## The Truncation bit specifies that the message was truncated.
|
||||||
TC: bool &log &default=F;
|
TC: bool &log &default=F;
|
||||||
## The Recursion Desired bit in a request message indicates that
|
## The Recursion Desired bit in a request message indicates that
|
||||||
## the client wants recursive service for this query.
|
## the client wants recursive service for this query.
|
||||||
RD: bool &log &default=F;
|
RD: bool &log &default=F;
|
||||||
## The Recursion Available bit in a response message indicates that
|
## The Recursion Available bit in a response message indicates
|
||||||
## the name server supports recursive queries.
|
## that the name server supports recursive queries.
|
||||||
RA: bool &log &default=F;
|
RA: bool &log &default=F;
|
||||||
## A reserved field that is currently supposed to be zero in all
|
## A reserved field that is currently supposed to be zero in all
|
||||||
## queries and responses.
|
## queries and responses.
|
||||||
|
@ -58,19 +58,19 @@ export {
|
||||||
## The set of resource descriptions in the query answer.
|
## The set of resource descriptions in the query answer.
|
||||||
answers: vector of string &log &optional;
|
answers: vector of string &log &optional;
|
||||||
## The caching intervals of the associated RRs described by the
|
## The caching intervals of the associated RRs described by the
|
||||||
## ``answers`` field.
|
## *answers* field.
|
||||||
TTLs: vector of interval &log &optional;
|
TTLs: vector of interval &log &optional;
|
||||||
## The DNS query was rejected by the server.
|
## The DNS query was rejected by the server.
|
||||||
rejected: bool &log &default=F;
|
rejected: bool &log &default=F;
|
||||||
|
|
||||||
## This value indicates if this request/response pair is ready to be
|
## This value indicates if this request/response pair is ready
|
||||||
## logged.
|
## to be logged.
|
||||||
ready: bool &default=F;
|
ready: bool &default=F;
|
||||||
## The total number of resource records in a reply message's answer
|
## The total number of resource records in a reply message's
|
||||||
## section.
|
## answer section.
|
||||||
total_answers: count &optional;
|
total_answers: count &optional;
|
||||||
## The total number of resource records in a reply message's answer,
|
## The total number of resource records in a reply message's
|
||||||
## authority, and additional sections.
|
## answer, authority, and additional sections.
|
||||||
total_replies: count &optional;
|
total_replies: count &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -78,9 +78,10 @@ export {
|
||||||
## record as it is sent to the logging framework.
|
## record as it is sent to the logging framework.
|
||||||
global log_dns: event(rec: Info);
|
global log_dns: event(rec: Info);
|
||||||
|
|
||||||
## This is called by the specific dns_*_reply events with a "reply" which
|
## This is called by the specific dns_*_reply events with a "reply"
|
||||||
## may not represent the full data available from the resource record, but
|
## which may not represent the full data available from the resource
|
||||||
## it's generally considered a summarization of the response(s).
|
## record, but it's generally considered a summarization of the
|
||||||
|
## responses.
|
||||||
##
|
##
|
||||||
## c: The connection record for which to fill in DNS reply data.
|
## c: The connection record for which to fill in DNS reply data.
|
||||||
##
|
##
|
||||||
|
@ -95,7 +96,7 @@ export {
|
||||||
## This can be used if additional initialization logic needs to happen
|
## This can be used if additional initialization logic needs to happen
|
||||||
## when creating a new session value.
|
## when creating a new session value.
|
||||||
##
|
##
|
||||||
## c: The connection involved in the new session
|
## c: The connection involved in the new session.
|
||||||
##
|
##
|
||||||
## msg: The DNS message header information.
|
## msg: The DNS message header information.
|
||||||
##
|
##
|
||||||
|
@ -109,9 +110,9 @@ export {
|
||||||
## query/response which haven't completed yet.
|
## query/response which haven't completed yet.
|
||||||
pending: table[count] of Queue::Queue;
|
pending: table[count] of Queue::Queue;
|
||||||
|
|
||||||
## This is the list of DNS responses that have completed based on the
|
## This is the list of DNS responses that have completed based
|
||||||
## number of responses declared and the number received. The contents
|
## on the number of responses declared and the number received.
|
||||||
## of the set are transaction IDs.
|
## The contents of the set are transaction IDs.
|
||||||
finished_answers: set[count];
|
finished_answers: set[count];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
1
scripts/base/protocols/ftp/README
Normal file
1
scripts/base/protocols/ftp/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for File Transfer Protocol (FTP) analysis.
|
|
@ -4,20 +4,20 @@
|
||||||
##! that successfully negotiate the GSSAPI method of an AUTH request
|
##! that successfully negotiate the GSSAPI method of an AUTH request
|
||||||
##! and for which the exchange involved an encoded TLS/SSL handshake,
|
##! and for which the exchange involved an encoded TLS/SSL handshake,
|
||||||
##! indicating the GSI mechanism for GSSAPI was used. This analysis
|
##! indicating the GSI mechanism for GSSAPI was used. This analysis
|
||||||
##! is all supported internally, this script simple adds the "gridftp"
|
##! is all supported internally, this script simply adds the "gridftp"
|
||||||
##! label to the *service* field of the control channel's
|
##! label to the *service* field of the control channel's
|
||||||
##! :bro:type:`connection` record.
|
##! :bro:type:`connection` record.
|
||||||
##!
|
##!
|
||||||
##! GridFTP data channels are identified by a heuristic that relies on
|
##! GridFTP data channels are identified by a heuristic that relies on
|
||||||
##! the fact that default settings for GridFTP clients typically
|
##! the fact that default settings for GridFTP clients typically
|
||||||
##! mutally authenticate the data channel with TLS/SSL and negotiate a
|
##! mutually authenticate the data channel with TLS/SSL and negotiate a
|
||||||
##! NULL bulk cipher (no encryption). Connections with those
|
##! NULL bulk cipher (no encryption). Connections with those
|
||||||
##! attributes are then polled for two minutes with decreasing frequency
|
##! attributes are then polled for two minutes with decreasing frequency
|
||||||
##! to check if the transfer sizes are large enough to indicate a
|
##! to check if the transfer sizes are large enough to indicate a
|
||||||
##! GridFTP data channel that would be undesireable to analyze further
|
##! GridFTP data channel that would be undesirable to analyze further
|
||||||
##! (e.g. stop TCP reassembly). A side effect is that true connection
|
##! (e.g. stop TCP reassembly). A side effect is that true connection
|
||||||
##! sizes are not logged, but at the benefit of saving CPU cycles that
|
##! sizes are not logged, but at the benefit of saving CPU cycles that
|
||||||
##! otherwise go to analyzing the large (and likely benign) connections.
|
##! would otherwise go to analyzing the large (and likely benign) connections.
|
||||||
|
|
||||||
@load ./info
|
@load ./info
|
||||||
@load ./main
|
@load ./main
|
||||||
|
@ -59,8 +59,8 @@ export {
|
||||||
## been exceeded. This is called in a :bro:see:`ssl_established` event
|
## been exceeded. This is called in a :bro:see:`ssl_established` event
|
||||||
## handler and by default looks for both a client and server certificate
|
## handler and by default looks for both a client and server certificate
|
||||||
## and for a NULL bulk cipher. One way in which this function could be
|
## and for a NULL bulk cipher. One way in which this function could be
|
||||||
## redefined is to make it also consider client/server certificate issuer
|
## redefined is to make it also consider client/server certificate
|
||||||
## subjects.
|
## issuer subjects.
|
||||||
##
|
##
|
||||||
## c: The connection which may possibly be a GridFTP data channel.
|
## c: The connection which may possibly be a GridFTP data channel.
|
||||||
##
|
##
|
||||||
|
|
|
@ -18,7 +18,8 @@ export {
|
||||||
orig_h: addr &log;
|
orig_h: addr &log;
|
||||||
## The host that will be accepting the data connection.
|
## The host that will be accepting the data connection.
|
||||||
resp_h: addr &log;
|
resp_h: addr &log;
|
||||||
## The port at which the acceptor is listening for the data connection.
|
## The port at which the acceptor is listening for the data
|
||||||
|
## connection.
|
||||||
resp_p: port &log;
|
resp_p: port &log;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -38,7 +39,8 @@ export {
|
||||||
## Argument for the command if one is given.
|
## Argument for the command if one is given.
|
||||||
arg: string &log &optional;
|
arg: string &log &optional;
|
||||||
|
|
||||||
## Libmagic "sniffed" file type if the command indicates a file transfer.
|
## Libmagic "sniffed" file type if the command indicates a file
|
||||||
|
## transfer.
|
||||||
mime_type: string &log &optional;
|
mime_type: string &log &optional;
|
||||||
## Size of the file if the command indicates a file transfer.
|
## Size of the file if the command indicates a file transfer.
|
||||||
file_size: count &log &optional;
|
file_size: count &log &optional;
|
||||||
|
@ -59,8 +61,8 @@ export {
|
||||||
|
|
||||||
## Command that is currently waiting for a response.
|
## Command that is currently waiting for a response.
|
||||||
cmdarg: CmdArg &optional;
|
cmdarg: CmdArg &optional;
|
||||||
## Queue for commands that have been sent but not yet responded to
|
## Queue for commands that have been sent but not yet responded
|
||||||
## are tracked here.
|
## to are tracked here.
|
||||||
pending_commands: PendingCmds;
|
pending_commands: PendingCmds;
|
||||||
|
|
||||||
## Indicates if the session is in active or passive mode.
|
## Indicates if the session is in active or passive mode.
|
||||||
|
|
|
@ -26,7 +26,7 @@ export {
|
||||||
const guest_ids = { "anonymous", "ftp", "ftpuser", "guest" } &redef;
|
const guest_ids = { "anonymous", "ftp", "ftpuser", "guest" } &redef;
|
||||||
|
|
||||||
## This record is to hold a parsed FTP reply code. For example, for the
|
## This record is to hold a parsed FTP reply code. For example, for the
|
||||||
## 201 status code, the digits would be parsed as: x->2, y->0, z=>1.
|
## 201 status code, the digits would be parsed as: x->2, y->0, z->1.
|
||||||
type ReplyCode: record {
|
type ReplyCode: record {
|
||||||
x: count;
|
x: count;
|
||||||
y: count;
|
y: count;
|
||||||
|
|
|
@ -11,14 +11,14 @@ export {
|
||||||
##
|
##
|
||||||
## rec: An :bro:type:`FTP::Info` record.
|
## rec: An :bro:type:`FTP::Info` record.
|
||||||
##
|
##
|
||||||
## Returns: A URL, not prefixed by "ftp://".
|
## Returns: A URL, not prefixed by ``"ftp://"``.
|
||||||
global build_url: function(rec: Info): string;
|
global build_url: function(rec: Info): string;
|
||||||
|
|
||||||
## Creates a URL from an :bro:type:`FTP::Info` record.
|
## Creates a URL from an :bro:type:`FTP::Info` record.
|
||||||
##
|
##
|
||||||
## rec: An :bro:type:`FTP::Info` record.
|
## rec: An :bro:type:`FTP::Info` record.
|
||||||
##
|
##
|
||||||
## Returns: A URL prefixed with "ftp://".
|
## Returns: A URL prefixed with ``"ftp://"``.
|
||||||
global build_url_ftp: function(rec: Info): string;
|
global build_url_ftp: function(rec: Info): string;
|
||||||
|
|
||||||
## Create an extremely shortened representation of a log line.
|
## Create an extremely shortened representation of a log line.
|
||||||
|
|
1
scripts/base/protocols/http/README
Normal file
1
scripts/base/protocols/http/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for Hypertext Transfer Protocol (HTTP) analysis.
|
|
@ -28,9 +28,11 @@ export {
|
||||||
|
|
||||||
## The current entity.
|
## The current entity.
|
||||||
current_entity: Entity &optional;
|
current_entity: Entity &optional;
|
||||||
## Current number of MIME entities in the HTTP request message body.
|
## Current number of MIME entities in the HTTP request message
|
||||||
|
## body.
|
||||||
orig_mime_depth: count &default=0;
|
orig_mime_depth: count &default=0;
|
||||||
## Current number of MIME entities in the HTTP response message body.
|
## Current number of MIME entities in the HTTP response message
|
||||||
|
## body.
|
||||||
resp_mime_depth: count &default=0;
|
resp_mime_depth: count &default=0;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,8 @@ export {
|
||||||
EMPTY
|
EMPTY
|
||||||
};
|
};
|
||||||
|
|
||||||
## This setting changes if passwords used in Basic-Auth are captured or not.
|
## This setting changes if passwords used in Basic-Auth are captured or
|
||||||
|
## not.
|
||||||
const default_capture_password = F &redef;
|
const default_capture_password = F &redef;
|
||||||
|
|
||||||
type Info: record {
|
type Info: record {
|
||||||
|
@ -36,8 +37,8 @@ export {
|
||||||
## URI used in the request.
|
## URI used in the request.
|
||||||
uri: string &log &optional;
|
uri: string &log &optional;
|
||||||
## Value of the "referer" header. The comment is deliberately
|
## Value of the "referer" header. The comment is deliberately
|
||||||
## misspelled like the standard declares, but the name used here is
|
## misspelled like the standard declares, but the name used here
|
||||||
## "referrer" spelled correctly.
|
## is "referrer" spelled correctly.
|
||||||
referrer: string &log &optional;
|
referrer: string &log &optional;
|
||||||
## Value of the User-Agent header from the client.
|
## Value of the User-Agent header from the client.
|
||||||
user_agent: string &log &optional;
|
user_agent: string &log &optional;
|
||||||
|
@ -55,7 +56,8 @@ export {
|
||||||
info_code: count &log &optional;
|
info_code: count &log &optional;
|
||||||
## Last seen 1xx informational reply message returned by the server.
|
## Last seen 1xx informational reply message returned by the server.
|
||||||
info_msg: string &log &optional;
|
info_msg: string &log &optional;
|
||||||
## Filename given in the Content-Disposition header sent by the server.
|
## Filename given in the Content-Disposition header sent by the
|
||||||
|
## server.
|
||||||
filename: string &log &optional;
|
filename: string &log &optional;
|
||||||
## A set of indicators of various attributes discovered and
|
## A set of indicators of various attributes discovered and
|
||||||
## related to a particular request/response pair.
|
## related to a particular request/response pair.
|
||||||
|
|
|
@ -6,8 +6,8 @@
|
||||||
module HTTP;
|
module HTTP;
|
||||||
|
|
||||||
export {
|
export {
|
||||||
## Given a string containing a series of key-value pairs separated by "=",
|
## Given a string containing a series of key-value pairs separated
|
||||||
## this function can be used to parse out all of the key names.
|
## by "=", this function can be used to parse out all of the key names.
|
||||||
##
|
##
|
||||||
## data: The raw data, such as a URL or cookie value.
|
## data: The raw data, such as a URL or cookie value.
|
||||||
##
|
##
|
||||||
|
@ -17,20 +17,20 @@ export {
|
||||||
## Returns: A vector of strings containing the keys.
|
## Returns: A vector of strings containing the keys.
|
||||||
global extract_keys: function(data: string, kv_splitter: pattern): string_vec;
|
global extract_keys: function(data: string, kv_splitter: pattern): string_vec;
|
||||||
|
|
||||||
## Creates a URL from an :bro:type:`HTTP::Info` record. This should handle
|
## Creates a URL from an :bro:type:`HTTP::Info` record. This should
|
||||||
## edge cases such as proxied requests appropriately.
|
## handle edge cases such as proxied requests appropriately.
|
||||||
##
|
##
|
||||||
## rec: An :bro:type:`HTTP::Info` record.
|
## rec: An :bro:type:`HTTP::Info` record.
|
||||||
##
|
##
|
||||||
## Returns: A URL, not prefixed by "http://".
|
## Returns: A URL, not prefixed by ``"http://"``.
|
||||||
global build_url: function(rec: Info): string;
|
global build_url: function(rec: Info): string;
|
||||||
|
|
||||||
## Creates a URL from an :bro:type:`HTTP::Info` record. This should handle
|
## Creates a URL from an :bro:type:`HTTP::Info` record. This should
|
||||||
## edge cases such as proxied requests appropriately.
|
## handle edge cases such as proxied requests appropriately.
|
||||||
##
|
##
|
||||||
## rec: An :bro:type:`HTTP::Info` record.
|
## rec: An :bro:type:`HTTP::Info` record.
|
||||||
##
|
##
|
||||||
## Returns: A URL prefixed with "http://".
|
## Returns: A URL prefixed with ``"http://"``.
|
||||||
global build_url_http: function(rec: Info): string;
|
global build_url_http: function(rec: Info): string;
|
||||||
|
|
||||||
## Create an extremely shortened representation of a log line.
|
## Create an extremely shortened representation of a log line.
|
||||||
|
|
1
scripts/base/protocols/irc/README
Normal file
1
scripts/base/protocols/irc/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for Internet Relay Chat (IRC) protocol analysis.
|
1
scripts/base/protocols/modbus/README
Normal file
1
scripts/base/protocols/modbus/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for Modbus protocol analysis.
|
|
@ -10,7 +10,7 @@ export {
|
||||||
type Info: record {
|
type Info: record {
|
||||||
## Time of the request.
|
## Time of the request.
|
||||||
ts: time &log;
|
ts: time &log;
|
||||||
## Unique identifier for the connnection.
|
## Unique identifier for the connection.
|
||||||
uid: string &log;
|
uid: string &log;
|
||||||
## Identifier for the connection.
|
## Identifier for the connection.
|
||||||
id: conn_id &log;
|
id: conn_id &log;
|
||||||
|
@ -20,8 +20,8 @@ export {
|
||||||
exception: string &log &optional;
|
exception: string &log &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
## Event that can be handled to access the Modbus record as it is sent on
|
## Event that can be handled to access the Modbus record as it is sent
|
||||||
## to the logging framework.
|
## on to the logging framework.
|
||||||
global log_modbus: event(rec: Info);
|
global log_modbus: event(rec: Info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
1
scripts/base/protocols/pop3/README
Normal file
1
scripts/base/protocols/pop3/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for POP3 (Post Office Protocol) protocol analysis.
|
1
scripts/base/protocols/smtp/README
Normal file
1
scripts/base/protocols/smtp/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for Simple Mail Transfer Protocol (SMTP) analysis.
|
|
@ -14,8 +14,8 @@ export {
|
||||||
uid: string &log;
|
uid: string &log;
|
||||||
## The connection's 4-tuple of endpoint addresses/ports.
|
## The connection's 4-tuple of endpoint addresses/ports.
|
||||||
id: conn_id &log;
|
id: conn_id &log;
|
||||||
## A count to represent the depth of this message transaction in a single
|
## A count to represent the depth of this message transaction in
|
||||||
## connection where multiple messages were transferred.
|
## a single connection where multiple messages were transferred.
|
||||||
trans_depth: count &log;
|
trans_depth: count &log;
|
||||||
## Contents of the Helo header.
|
## Contents of the Helo header.
|
||||||
helo: string &log &optional;
|
helo: string &log &optional;
|
||||||
|
@ -37,7 +37,7 @@ export {
|
||||||
in_reply_to: string &log &optional;
|
in_reply_to: string &log &optional;
|
||||||
## Contents of the Subject header.
|
## Contents of the Subject header.
|
||||||
subject: string &log &optional;
|
subject: string &log &optional;
|
||||||
## Contents of the X-Origininating-IP header.
|
## Contents of the X-Originating-IP header.
|
||||||
x_originating_ip: addr &log &optional;
|
x_originating_ip: addr &log &optional;
|
||||||
## Contents of the first Received header.
|
## Contents of the first Received header.
|
||||||
first_received: string &log &optional;
|
first_received: string &log &optional;
|
||||||
|
@ -50,7 +50,8 @@ export {
|
||||||
## Value of the User-Agent header from the client.
|
## Value of the User-Agent header from the client.
|
||||||
user_agent: string &log &optional;
|
user_agent: string &log &optional;
|
||||||
|
|
||||||
## Indicates if the "Received: from" headers should still be processed.
|
## Indicates if the "Received: from" headers should still be
|
||||||
|
## processed.
|
||||||
process_received_from: bool &default=T;
|
process_received_from: bool &default=T;
|
||||||
## Indicates if client activity has been seen, but not yet logged.
|
## Indicates if client activity has been seen, but not yet logged.
|
||||||
has_client_activity: bool &default=F;
|
has_client_activity: bool &default=F;
|
||||||
|
@ -58,9 +59,9 @@ export {
|
||||||
|
|
||||||
type State: record {
|
type State: record {
|
||||||
helo: string &optional;
|
helo: string &optional;
|
||||||
## Count the number of individual messages transmitted during this
|
## Count the number of individual messages transmitted during
|
||||||
## SMTP session. Note, this is not the number of recipients, but the
|
## this SMTP session. Note, this is not the number of
|
||||||
## number of message bodies transferred.
|
## recipients, but the number of message bodies transferred.
|
||||||
messages_transferred: count &default=0;
|
messages_transferred: count &default=0;
|
||||||
|
|
||||||
pending_messages: set[Info] &optional;
|
pending_messages: set[Info] &optional;
|
||||||
|
|
1
scripts/base/protocols/socks/README
Normal file
1
scripts/base/protocols/socks/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for Socket Secure (SOCKS) protocol analysis.
|
|
@ -9,17 +9,19 @@ export {
|
||||||
type Info: record {
|
type Info: record {
|
||||||
## Time when the proxy connection was first detected.
|
## Time when the proxy connection was first detected.
|
||||||
ts: time &log;
|
ts: time &log;
|
||||||
## Unique ID for the tunnel - may correspond to connection uid or be non-existent.
|
## Unique ID for the tunnel - may correspond to connection uid
|
||||||
|
## or be non-existent.
|
||||||
uid: string &log;
|
uid: string &log;
|
||||||
## The connection's 4-tuple of endpoint addresses/ports.
|
## The connection's 4-tuple of endpoint addresses/ports.
|
||||||
id: conn_id &log;
|
id: conn_id &log;
|
||||||
## Protocol version of SOCKS.
|
## Protocol version of SOCKS.
|
||||||
version: count &log;
|
version: count &log;
|
||||||
## Username for the proxy if extracted from the network..
|
## Username for the proxy if extracted from the network.
|
||||||
user: string &log &optional;
|
user: string &log &optional;
|
||||||
## Server status for the attempt at using the proxy.
|
## Server status for the attempt at using the proxy.
|
||||||
status: string &log &optional;
|
status: string &log &optional;
|
||||||
## Client requested SOCKS address. Could be an address, a name or both.
|
## Client requested SOCKS address. Could be an address, a name
|
||||||
|
## or both.
|
||||||
request: SOCKS::Address &log &optional;
|
request: SOCKS::Address &log &optional;
|
||||||
## Client requested port.
|
## Client requested port.
|
||||||
request_p: port &log &optional;
|
request_p: port &log &optional;
|
||||||
|
|
1
scripts/base/protocols/ssh/README
Normal file
1
scripts/base/protocols/ssh/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for Secure Shell (SSH) protocol analysis.
|
|
@ -25,8 +25,8 @@ export {
|
||||||
uid: string &log;
|
uid: string &log;
|
||||||
## The connection's 4-tuple of endpoint addresses/ports.
|
## The connection's 4-tuple of endpoint addresses/ports.
|
||||||
id: conn_id &log;
|
id: conn_id &log;
|
||||||
## Indicates if the login was heuristically guessed to be "success",
|
## Indicates if the login was heuristically guessed to be
|
||||||
## "failure", or "undetermined".
|
## "success", "failure", or "undetermined".
|
||||||
status: string &log &default="undetermined";
|
status: string &log &default="undetermined";
|
||||||
## Direction of the connection. If the client was a local host
|
## Direction of the connection. If the client was a local host
|
||||||
## logging into an external host, this would be OUTBOUND. INBOUND
|
## logging into an external host, this would be OUTBOUND. INBOUND
|
||||||
|
@ -39,8 +39,8 @@ export {
|
||||||
server: string &log &optional;
|
server: string &log &optional;
|
||||||
## Amount of data returned from the server. This is currently
|
## Amount of data returned from the server. This is currently
|
||||||
## the only measure of the success heuristic and it is logged to
|
## the only measure of the success heuristic and it is logged to
|
||||||
## assist analysts looking at the logs to make their own determination
|
## assist analysts looking at the logs to make their own
|
||||||
## about the success on a case-by-case basis.
|
## determination about the success on a case-by-case basis.
|
||||||
resp_size: count &log &default=0;
|
resp_size: count &log &default=0;
|
||||||
|
|
||||||
## Indicate if the SSH session is done being watched.
|
## Indicate if the SSH session is done being watched.
|
||||||
|
|
1
scripts/base/protocols/ssl/README
Normal file
1
scripts/base/protocols/ssl/README
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Support for Secure Sockets Layer (SSL) protocol analysis.
|
|
@ -23,7 +23,7 @@ export {
|
||||||
} &default=function(i: count):string { return fmt("unknown-%d", i); };
|
} &default=function(i: count):string { return fmt("unknown-%d", i); };
|
||||||
|
|
||||||
## Mapping between numeric codes and human readable strings for alert
|
## Mapping between numeric codes and human readable strings for alert
|
||||||
## descriptions..
|
## descriptions.
|
||||||
const alert_descriptions: table[count] of string = {
|
const alert_descriptions: table[count] of string = {
|
||||||
[0] = "close_notify",
|
[0] = "close_notify",
|
||||||
[10] = "unexpected_message",
|
[10] = "unexpected_message",
|
||||||
|
@ -453,8 +453,8 @@ export {
|
||||||
const TLS_EMPTY_RENEGOTIATION_INFO_SCSV = 0x00FF;
|
const TLS_EMPTY_RENEGOTIATION_INFO_SCSV = 0x00FF;
|
||||||
|
|
||||||
## This is a table of all known cipher specs. It can be used for
|
## This is a table of all known cipher specs. It can be used for
|
||||||
## detecting unknown ciphers and for converting the cipher spec constants
|
## detecting unknown ciphers and for converting the cipher spec
|
||||||
## into a human readable format.
|
## constants into a human readable format.
|
||||||
const cipher_desc: table[count] of string = {
|
const cipher_desc: table[count] of string = {
|
||||||
[SSLv20_CK_RC4_128_EXPORT40_WITH_MD5] =
|
[SSLv20_CK_RC4_128_EXPORT40_WITH_MD5] =
|
||||||
"SSLv20_CK_RC4_128_EXPORT40_WITH_MD5",
|
"SSLv20_CK_RC4_128_EXPORT40_WITH_MD5",
|
||||||
|
|
|
@ -26,7 +26,8 @@ export {
|
||||||
session_id: string &log &optional;
|
session_id: string &log &optional;
|
||||||
## Subject of the X.509 certificate offered by the server.
|
## Subject of the X.509 certificate offered by the server.
|
||||||
subject: string &log &optional;
|
subject: string &log &optional;
|
||||||
## Subject of the signer of the X.509 certificate offered by the server.
|
## Subject of the signer of the X.509 certificate offered by the
|
||||||
|
## server.
|
||||||
issuer_subject: string &log &optional;
|
issuer_subject: string &log &optional;
|
||||||
## NotValidBefore field value from the server certificate.
|
## NotValidBefore field value from the server certificate.
|
||||||
not_valid_before: time &log &optional;
|
not_valid_before: time &log &optional;
|
||||||
|
@ -37,7 +38,8 @@ export {
|
||||||
|
|
||||||
## Subject of the X.509 certificate offered by the client.
|
## Subject of the X.509 certificate offered by the client.
|
||||||
client_subject: string &log &optional;
|
client_subject: string &log &optional;
|
||||||
## Subject of the signer of the X.509 certificate offered by the client.
|
## Subject of the signer of the X.509 certificate offered by the
|
||||||
|
## client.
|
||||||
client_issuer_subject: string &log &optional;
|
client_issuer_subject: string &log &optional;
|
||||||
|
|
||||||
## Full binary server certificate stored in DER format.
|
## Full binary server certificate stored in DER format.
|
||||||
|
@ -58,8 +60,8 @@ export {
|
||||||
analyzer_id: count &optional;
|
analyzer_id: count &optional;
|
||||||
};
|
};
|
||||||
|
|
||||||
## The default root CA bundle. By loading the
|
## The default root CA bundle. By default, the mozilla-ca-list.bro
|
||||||
## mozilla-ca-list.bro script it will be set to Mozilla's root CA list.
|
## script sets this to Mozilla's root CA list.
|
||||||
const root_certs: table[string] of string = {} &redef;
|
const root_certs: table[string] of string = {} &redef;
|
||||||
|
|
||||||
## If true, detach the SSL analyzer from the connection to prevent
|
## If true, detach the SSL analyzer from the connection to prevent
|
||||||
|
@ -67,8 +69,8 @@ export {
|
||||||
## (especially with large file transfers).
|
## (especially with large file transfers).
|
||||||
const disable_analyzer_after_detection = T &redef;
|
const disable_analyzer_after_detection = T &redef;
|
||||||
|
|
||||||
## Delays an SSL record for a specific token: the record will not be logged
|
## Delays an SSL record for a specific token: the record will not be
|
||||||
## as longs the token exists or until 15 seconds elapses.
|
## logged as long as the token exists or until 15 seconds elapses.
|
||||||
global delay_log: function(info: Info, token: string);
|
global delay_log: function(info: Info, token: string);
|
||||||
|
|
||||||
## Undelays an SSL record for a previously inserted token, allowing the
|
## Undelays an SSL record for a previously inserted token, allowing the
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue