Merge remote-tracking branch 'origin/master' into topic/seth/file-entropy

# Conflicts:
#	scripts/test-all-policy.bro
#	testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log
This commit is contained in:
Seth Hall 2016-03-21 11:39:15 -04:00
commit 89b4d79f93
1081 changed files with 38403 additions and 11012 deletions

3
.gitmodules vendored
View file

@ -22,3 +22,6 @@
[submodule "aux/plugins"]
path = aux/plugins
url = git://git.bro.org/bro-plugins
[submodule "aux/broker"]
path = aux/broker
url = git://git.bro.org/broker

1402
CHANGES

File diff suppressed because it is too large Load diff

View file

@ -61,7 +61,7 @@ if (NOT SED_EXE)
endif ()
endif ()
FindRequiredPackage(Perl)
FindRequiredPackage(PythonInterp)
FindRequiredPackage(FLEX)
FindRequiredPackage(BISON)
FindRequiredPackage(PCAP)
@ -88,7 +88,7 @@ endif ()
include_directories(BEFORE
${PCAP_INCLUDE_DIR}
${OpenSSL_INCLUDE_DIR}
${OPENSSL_INCLUDE_DIR}
${BIND_INCLUDE_DIR}
${BinPAC_INCLUDE_DIR}
${ZLIB_INCLUDE_DIR}
@ -113,7 +113,7 @@ if (NOT DISABLE_PERFTOOLS)
find_package(GooglePerftools)
endif ()
if (GOOGLEPERFTOOLS_FOUND)
if (GOOGLEPERFTOOLS_FOUND OR TCMALLOC_FOUND)
set(HAVE_PERFTOOLS true)
# Non-Linux systems may not be well-supported by gperftools, so
# require explicit request from user to enable it in that case.
@ -141,7 +141,7 @@ endif ()
set(brodeps
${BinPAC_LIBRARY}
${PCAP_LIBRARY}
${OpenSSL_LIBRARIES}
${OPENSSL_LIBRARIES}
${BIND_LIBRARY}
${ZLIB_LIBRARY}
${JEMALLOC_LIBRARIES}
@ -165,18 +165,26 @@ include(PCAPTests)
include(OpenSSLTests)
include(CheckNameserCompat)
include(GetArchitecture)
include(RequireCXX11)
# Tell the plugin code that we're building as part of the main tree.
set(BRO_PLUGIN_INTERNAL_BUILD true CACHE INTERNAL "" FORCE)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in
${CMAKE_CURRENT_BINARY_DIR}/config.h)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/bro-config.h.in
${CMAKE_CURRENT_BINARY_DIR}/bro-config.h)
include_directories(${CMAKE_CURRENT_BINARY_DIR})
########################################################################
## Recurse on sub-directories
if ( ENABLE_BROKER )
add_subdirectory(aux/broker)
set(brodeps ${brodeps} broker)
add_definitions(-DENABLE_BROKER)
include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/aux/broker)
endif ()
add_subdirectory(src)
add_subdirectory(scripts)
add_subdirectory(doc)
@ -224,6 +232,8 @@ message(
"\nCXXFLAGS: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${BuildType}}"
"\nCPP: ${CMAKE_CXX_COMPILER}"
"\n"
"\nBroker: ${ENABLE_BROKER}"
"\nBroker Python: ${BROKER_PYTHON_BINDINGS}"
"\nBroccoli: ${INSTALL_BROCCOLI}"
"\nBroctl: ${INSTALL_BROCTL}"
"\nAux. Tools: ${INSTALL_AUX_TOOLS}"

View file

@ -1,4 +1,4 @@
Copyright (c) 1995-2013, The Regents of the University of California
Copyright (c) 1995-2015, The Regents of the University of California
through the Lawrence Berkeley National Laboratory and the
International Computer Science Institute. All rights reserved.

View file

@ -1,3 +0,0 @@
See doc/install/install.rst for installation instructions.

1
INSTALL Symbolic link
View file

@ -0,0 +1 @@
doc/install/install.rst

View file

@ -51,13 +51,15 @@ distclean:
$(MAKE) -C testing $@
test:
@( cd testing && make )
-@( cd testing && make )
test-all: test
test -d aux/broctl && ( cd aux/broctl && make test-all )
test -d aux/btest && ( cd aux/btest && make test )
test -d aux/bro-aux && ( cd aux/bro-aux && make test )
test -d aux/plugins && ( cd aux/plugins && make test-all )
test-aux:
-test -d aux/broctl && ( cd aux/broctl && make test-all )
-test -d aux/btest && ( cd aux/btest && make test )
-test -d aux/bro-aux && ( cd aux/bro-aux && make test )
-test -d aux/plugins && ( cd aux/plugins && make test-all )
test-all: test test-aux
configured:
@test -d $(BUILD) || ( echo "Error: No build/ directory found. Did you run configure?" && exit 1 )

292
NEWS
View file

@ -4,11 +4,100 @@ release. For an exhaustive list of changes, see the ``CHANGES`` file
(note that submodules, such as BroControl and Broccoli, come with
their own ``CHANGES``.)
Bro 2.4 (in progress)
Bro 2.5 (in progress)
=====================
Dependencies
------------
New Dependencies
----------------
- Bro now requires a compiler with C++11 support for building the
source code.
- Bro now requires the C++ Actor Framework, CAF, which must be
installed first. See http://actor-framework.org.
- Bro now requires Python instead of Perl to compile the source code.
- The pcap buffer size can set through the new option Pcap::bufsize.
New Functionality
-----------------
- Bro now includes the NetControl framework. The framework allows for easy
interaction of Bro with hard- and software switches, firewalls, etc.
- Bro now supports the Radiotap header for 802.11 frames.
- Bro now tracks VLAN IDs. To record them inside the connection log,
load protocols/conn/vlan-logging.bro.
- A new per-packet event raw_packet() provides access to layer 2
information. Use with care, generating events per packet is
expensive.
- A new built-in function, decode_base64_conn() for Base64 decoding.
It works like decode_base64() but receives an additional connection
argument that will be used for decoding errors into weird.log
(instead of reporter.log).
- Two new built-in functions for handling set[subnet] and table[subnet]:
- check_subnet(subnet, table) checks if a specific subnet is a member
of a set/table. This is different from the "in" operator, which always
performs a longest prefix match.
- matching_subnets(subnet, table) returns all subnets of the set or table
that contain the given subnet.
- Several built-in functions for handling IP addresses and subnets were added:
- is_v4_subnet(subnet) checks whether a subnet specification is IPv4.
- is_v6_subnet(subnet) checks whether a subnet specification is IPv6.
- addr_to_subnet(addr) converts an IP address to a /32 subnet.
- subnet_to_addr(subnet) returns the IP address part of a subnet.
- subnet_width(subnet) returns the width of a subnet.
- The IRC analyzer now recognizes StartTLS sessions and enable the SSL
analyzer for them.
- New Bro plugins in aux/plugins:
- af_packet: Native AF_PACKET support.
- kafka : Log writer interfacing to Kafka.
- myricom: Native Myricom SNF v3 support.
- pf_ring: Native PF_RING support.
- redis: An experimental log writer for Redis.
- tcprs: An TCP-level analyzer detecting retransmissions, reordering, and more.
Changed Functionality
---------------------
- ``SSH::skip_processing_after_detection`` was removed. The functionality was
replaced by ``SSH::disable_analyzer_after_detection``.
- Some script-level identifier have changed their names:
snaplen -> Pcap::snaplen
precompile_pcap_filter() -> Pcap::precompile_pcap_filter()
install_pcap_filter() -> Pcap::install_pcap_filter()
pcap_error() -> Pcap::pcap_error()
Deprecated Functionality
------------------------
- The built-in functions decode_base64_custom() and
encode_base64_custom() are no longer needed and will be removed
in the future. Their functionality is now provided directly by
decode_base64() and encode_base64(), which take an optional
parameter to change the Base64 alphabet.
Bro 2.4
=======
New Functionality
-----------------
@ -16,20 +105,51 @@ New Functionality
- Bro now has support for external plugins that can extend its core
functionality, like protocol/file analysis, via shared libraries.
Plugins can be developed and distributed externally, and will be
pulled in dynamically at startup. Currently, a plugin can provide
custom protocol analyzers, file analyzers, log writers[TODO], input
readers[TODO], packet sources[TODO], and new built-in functions. A
plugin can furthermore hook into Bro's processing a number of places
to add custom logic.
pulled in dynamically at startup (the environment variables
BRO_PLUGIN_PATH and BRO_PLUGIN_ACTIVATE can be used to specify the
locations and names of plugins to activate). Currently, a plugin
can provide custom protocol analyzers, file analyzers, log writers,
input readers, packet sources and dumpers, and new built-in functions.
A plugin can furthermore hook into Bro's processing at a number of
places to add custom logic.
See https://www.bro.org/sphinx-git/devel/plugins.html for more
information on writing plugins.
- Bro now has supoprt for the MySQL wire protocol. Activity gets
- Bro now has support for the MySQL wire protocol. Activity gets
logged into mysql.log.
- Bro now parses DTLS traffic. Activity gets logged into ssl.log.
- Bro now has support for the Kerberos KRB5 protocol over TCP and
UDP. Activity gets logged into kerberos.log.
- Bro now has an RDP analyzer. Activity gets logged into rdp.log.
- Bro now has a file analyzer for Portable Executables. Activity gets
logged into pe.log.
- Bro now has support for the SIP protocol over UDP. Activity gets
logged into sip.log.
- Bro now features a completely rewritten, enhanced SSH analyzer. The
new analyzer is able to determine if logins failed or succeeded in
most circumstances, logs a lot more more information about SSH
sessions, supports v1, and introduces the intelligence type
``Intel::PUBKEY_HASH`` and location ``SSH::IN_SERVER_HOST_KEY``. The
analayzer also generates a set of additional events
(``ssh_auth_successful``, ``ssh_auth_failed``, ``ssh_capabilities``,
``ssh2_server_host_key``, ``ssh1_server_host_key``,
``ssh_encrypted_packet``, ``ssh2_dh_server_params``,
``ssh2_gss_error``, ``ssh2_ecc_key``). See next section for
incompatible SSH changes.
- Bro's file analysis now supports reassembly of files that are not
transferred/seen sequentially.
transferred/seen sequentially. The default file reassembly buffer
size is set with the ``Files::reassembly_buffer_size`` variable.
- Bro's file type identification has been greatly improved (new file types,
bug fixes, and performance improvements).
- Bro's scripting language now has a ``while`` statement::
@ -39,6 +159,70 @@ New Functionality
``next`` and ``break`` can be used inside the loop's body just like
with ``for`` loops.
- Bro now integrates Broker, a new communication library. See
aux/broker/README for more information on Broker, and
doc/frameworks/broker.rst for the corresponding Bro script API.
With Broker, Bro has the similar capabilities of exchanging events and
logs with remote peers (either another Bro process or some other
application that uses Broker). It also includes a key-value store
API that can be used to share state between peers and optionally
allow data to persist on disk for longer-term storage.
Broker support is by default off for now; it can be enabled at
configure time with --enable-broker. It requires CAF version 0.13+
(https://github.com/actor-framework/actor-framework) as well as a
C++11 compiler (e.g. GCC 4.8+ or Clang 3.3+).
Broker will become a mandatory dependency in future Bro versions and
replace the current communication and serialization system.
- Add --enable-c++11 configure flag to compile Bro's source code in
C++11 mode with a corresponding compiler. Note that 2.4 will be the
last version of Bro that compiles without C++11 support.
- The SSL analysis now alerts when encountering SSL connections with
old protocol versions or unsafe cipher suites. It also gained
extended reporting of weak keys, caching of already validated
certificates, and full support for TLS record defragmentation. SSL generally
became much more robust and added several fields to ssl.log (while
removing some others).
- A new icmp_sent_payload event provides access to ICMP payload.
- The input framework's raw reader now supports seeking by adding an
option "offset" to the config map. Positive offsets are interpreted
to be from the beginning of the file, negative from the end of the
file (-1 is end of file).
- One can now raise events when a connection crosses a given size
threshold in terms of packets or bytes. The primary API for that
functionality is in base/protocols/conn/thresholds.bro.
- There is a new command-line option -Q/--time that prints Bro's execution
time and memory usage to stderr.
- BroControl now has a new command "deploy" which is equivalent to running
the "check", "install", "stop", and "start" commands (in that order).
- BroControl now has a new option "StatusCmdShowAll" that controls whether
or not the broctl "status" command gathers all of the status information.
This option can be used to make the "status" command run significantly
faster (in this case, the "Peers" column will not be shown in the output).
- BroControl now has a new option "StatsLogEnable" that controls whether
or not broctl will record information to the "stats.log" file. This option
can be used to make the "broctl cron" command run slightly faster (in this
case, "broctl cron" will also no longer send email about not seeing any
packets on the monitoring interfaces).
- BroControl now has a new option "MailHostUpDown" which controls whether or
not the "broctl cron" command will send email when it notices that a host
in the cluster is up or down.
- BroControl now has a new option "CommandTimeout" which specifies the number
of seconds to wait for a command that broctl ran to return results.
Changed Functionality
---------------------
@ -47,9 +231,17 @@ Changed Functionality
- File analysis
* Removed ``fa_file`` record's ``mime_type`` and ``mime_types``
fields. The events ``file_mime_type`` and ``file_mime_types``
have been added which contain the same information. The
``mime_type`` field of ``Files::Info`` also still has this info.
fields. The event ``file_sniff`` has been added which provides
the same information. The ``mime_type`` field of ``Files::Info``
also still has this info.
* The earliest point that new mime type information is available is
in the ``file_sniff`` event which comes after the ``file_new`` and
``file_over_new_connection`` events. Scripts which inspected mime
type info within those events will need to be adapted. (Note: for
users that worked w/ versions of Bro from git, for a while there was
also an event called ``file_mime_type`` which is now replaced with
the ``file_sniff`` event).
* Removed ``Files::add_analyzers_for_mime_type`` function.
@ -58,15 +250,83 @@ Changed Functionality
reassembly for non-sequential files, "offset" can be obtained
with other information already available -- adding together
``seen_bytes`` and ``missed_bytes`` fields of the ``fa_file``
record gives the how many bytes have been written so far (i.e.
record gives how many bytes have been written so far (i.e.
the "offset").
- has_valid_octets: now uses a string_vec parameter instead of
- The SSH changes come with a few incompatibilities. The following
events have been renamed:
* ``SSH::heuristic_failed_login`` to ``ssh_auth_failed``
* ``SSH::heuristic_successful_login`` to ``ssh_auth_successful``
The ``SSH::Info`` status field has been removed and replaced with
the ``auth_success`` field. This field has been changed from a
string that was previously ``success``, ``failure`` or
``undetermined`` to a boolean. a boolean that is ``T``, ``F``, or
unset.
- The has_valid_octets function now uses a string_vec parameter instead of
string_array.
- conn.log gained a new field local_resp that works like local_orig,
just for the responder address of the connection.
- GRE tunnels are now identified as ``Tunnel::GRE`` instead of
``Tunnel::IP``.
- The default name for extracted files changed from extract-protocol-id
to extract-timestamp-protocol-id.
- The weird named "unmatched_HTTP_reply" has been removed since it can
be detected at the script-layer and is handled correctly by the
default HTTP scripts.
- When adding a logging filter to a stream, the filter can now inherit
a default ``path`` field from the associated ``Log::Stream`` record.
- When adding a logging filter to a stream, the
``Log::default_path_func`` is now only automatically added to the
filter if it has neither a ``path`` nor a ``path_func`` already
explicitly set. Before, the default path function would always be set
for all filters which didn't specify their own ``path_func``.
- BroControl now establishes only one ssh connection from the manager to
each remote host in a cluster configuration (previously, there would be
one ssh connection per remote Bro process).
- BroControl now uses SQLite to record state information instead of a
plain text file (the file "spool/broctl.dat" is no longer used).
On FreeBSD, this means that there is a new dependency on the package
"py27-sqlite3".
- BroControl now records the expected running state of each Bro node right
before each start or stop. The "broctl cron" command uses this info to
either start or stop Bro nodes as needed so that the actual state matches
the expected state (previously, "broctl cron" could only start nodes in
the "crashed" state, and could never stop a node).
- BroControl now sends all normal command output (i.e., not error messages)
to stdout. Error messages are still sent to stderr, however.
- The capability of processing NetFlow input has been removed for the
time being. Therefore, the -y/--flowfile and -Y/--netflow command-line
options have been removed, and the netflow_v5_header and netflow_v5_record
events have been removed.
- The -D/--dfa-size command-line option has been removed.
- The -L/--rule-benchmark command-line option has been removed.
- The -O/--optimize command-line option has been removed.
- The deprecated fields "hot" and "addl" have been removed from the
connection record. Likewise, the functions append_addl() and
append_addl_marker() have been removed.
- Log files now escape non-printable characters consistently as "\xXX'.
Furthermore, backslashes are escaped as "\\", making the
representation fully reversible.
Deprecated Functionality
------------------------
@ -76,7 +336,7 @@ Deprecated Functionality
concatenation/extraction functions. Note that the new functions use
0-based indexing, rather than 1-based.
The full list of now deprecation functions is:
The full list of now deprecated functions is:
* split: use split_string instead.

1
README.rst Symbolic link
View file

@ -0,0 +1 @@
README

View file

@ -1 +1 @@
2.3-451
2.4-406

@ -1 +1 @@
Subproject commit 33cb1f8e6bf2e33c2773e86b157e1f343ee85dc6
Subproject commit 424d40c1e8d5888311b50c0e5a9dfc9c5f818b66

@ -1 +1 @@
Subproject commit c9d340847c668590a450f1881e6e3d763abe1138
Subproject commit 105dfe4ad6c4ae4563b21cb0466ee350f0af0d43

@ -1 +1 @@
Subproject commit 1d55a0a84c5b1d0aa1727829300b388c92f92daa
Subproject commit 6ded82da498d805def6aa129cd7691d3b7287c37

@ -1 +1 @@
Subproject commit 76f99ea52c3e021cade3d03eda7865d4f4d1793e
Subproject commit 583f3a3ff1847cf96a87f865d5cf0f36fae9dd67

1
aux/broker Submodule

@ -0,0 +1 @@
Subproject commit fe35cde8f07ff7cf6decd2fb761cffc32e763d2d

@ -1 +1 @@
Subproject commit 93d4989ed1537e4d143cf09d44077159f869a4b2
Subproject commit 4bea8fa948be2bc86ff92399137131bc1c029b08

@ -1 +1 @@
Subproject commit 71d820e9d8ca753fea8fb34ea3987993b28d79e4
Subproject commit ab61be0c4f128c976f72dfa5a09a87cd842f387a

2
cmake

@ -1 +1 @@
Subproject commit ff08be5aa1b8eaadbe2775cbc11b499c5f93349e
Subproject commit 537e45afe1006a10f73847fab5f13d28ce43fc4d

50
configure vendored
View file

@ -41,11 +41,13 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--enable-perftools-debug use Google's perftools for debugging
--enable-jemalloc link against jemalloc
--enable-ruby build ruby bindings for broccoli (deprecated)
--disable-broker disable use of the Broker communication library
--disable-broccoli don't build or install the Broccoli library
--disable-broctl don't install Broctl
--disable-auxtools don't build or install auxiliary tools
--disable-perftools don't try to build with Google Perftools
--disable-python don't try to build python bindings for broccoli
--disable-pybroker don't try to build python bindings for broker
Required Packages in Non-Standard Locations:
--with-openssl=PATH path to OpenSSL install root
@ -54,19 +56,22 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--with-binpac=PATH path to BinPAC install root
--with-flex=PATH path to flex executable
--with-bison=PATH path to bison executable
--with-perl=PATH path to perl executable
--with-python=PATH path to Python executable
--with-libcaf=PATH path to C++ Actor Framework installation
(a required Broker dependency)
Optional Packages in Non-Standard Locations:
--with-geoip=PATH path to the libGeoIP install root
--with-perftools=PATH path to Google Perftools install root
--with-jemalloc=PATH path to jemalloc install root
--with-python=PATH path to Python interpreter
--with-python-lib=PATH path to libpython
--with-python-inc=PATH path to Python headers
--with-ruby=PATH path to ruby interpreter
--with-ruby-lib=PATH path to ruby library
--with-ruby-inc=PATH path to ruby headers
--with-swig=PATH path to SWIG executable
--with-rocksdb=PATH path to RocksDB installation
(an optional Broker dependency)
Packaging Options (for developers):
--binary-package toggle special logic for binary packaging
@ -88,7 +93,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
sourcedir="$( cd "$( dirname "$0" )" && pwd )"
# Function to append a CMake cache entry definition to the
# CMakeCacheEntries variable
# CMakeCacheEntries variable.
# $1 is the cache entry variable name
# $2 is the cache entry variable type
# $3 is the cache entry variable value
@ -96,6 +101,17 @@ append_cache_entry () {
CMakeCacheEntries="$CMakeCacheEntries -D $1:$2=$3"
}
# Function to remove a CMake cache entry definition from the
# CMakeCacheEntries variable
# $1 is the cache entry variable name
remove_cache_entry () {
CMakeCacheEntries="$CMakeCacheEntries -U $1"
# Even with -U, cmake still warns by default if
# added previously with -D.
CMakeCacheEntries="$CMakeCacheEntries --no-warn-unused-cli"
}
# set defaults
builddir=build
prefix=/usr/local/bro
@ -105,10 +121,13 @@ append_cache_entry BRO_ROOT_DIR PATH $prefix
append_cache_entry PY_MOD_INSTALL_DIR PATH $prefix/lib/broctl
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro
append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc
append_cache_entry BROKER_PYTHON_HOME PATH $prefix
append_cache_entry BROKER_PYTHON_BINDINGS BOOL false
append_cache_entry ENABLE_DEBUG BOOL false
append_cache_entry ENABLE_PERFTOOLS BOOL false
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false
append_cache_entry ENABLE_JEMALLOC BOOL false
append_cache_entry ENABLE_JEMALLOC BOOL false
append_cache_entry ENABLE_BROKER BOOL true
append_cache_entry BinPAC_SKIP_INSTALL BOOL true
append_cache_entry BUILD_SHARED_LIBS BOOL true
append_cache_entry INSTALL_AUX_TOOLS BOOL true
@ -142,6 +161,10 @@ while [ $# -ne 0 ]; do
append_cache_entry CMAKE_INSTALL_PREFIX PATH $optarg
append_cache_entry BRO_ROOT_DIR PATH $optarg
append_cache_entry PY_MOD_INSTALL_DIR PATH $optarg/lib/broctl
if [ -z "$user_disabled_broker" ]; then
append_cache_entry BROKER_PYTHON_HOME PATH $optarg
fi
;;
--scriptdir=*)
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg
@ -176,6 +199,11 @@ while [ $# -ne 0 ]; do
--enable-jemalloc)
append_cache_entry ENABLE_JEMALLOC BOOL true
;;
--disable-broker)
append_cache_entry ENABLE_BROKER BOOL false
remove_cache_entry BROKER_PYTHON_HOME
user_disabled_broker="true"
;;
--disable-broccoli)
append_cache_entry INSTALL_BROCCOLI BOOL false
;;
@ -191,11 +219,14 @@ while [ $# -ne 0 ]; do
--disable-python)
append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true
;;
--disable-pybroker)
append_cache_entry DISABLE_PYBROKER BOOL true
;;
--enable-ruby)
append_cache_entry DISABLE_RUBY_BINDINGS BOOL false
;;
--with-openssl=*)
append_cache_entry OpenSSL_ROOT_DIR PATH $optarg
append_cache_entry OPENSSL_ROOT_DIR PATH $optarg
;;
--with-bind=*)
append_cache_entry BIND_ROOT_DIR PATH $optarg
@ -212,9 +243,6 @@ while [ $# -ne 0 ]; do
--with-bison=*)
append_cache_entry BISON_EXECUTABLE PATH $optarg
;;
--with-perl=*)
append_cache_entry PERL_EXECUTABLE PATH $optarg
;;
--with-geoip=*)
append_cache_entry LibGeoIP_ROOT_DIR PATH $optarg
;;
@ -248,6 +276,12 @@ while [ $# -ne 0 ]; do
--with-swig=*)
append_cache_entry SWIG_EXECUTABLE PATH $optarg
;;
--with-libcaf=*)
append_cache_entry LIBCAF_ROOT_DIR PATH $optarg
;;
--with-rocksdb=*)
append_cache_entry ROCKSDB_ROOT_DIR PATH $optarg
;;
--binary-package)
append_cache_entry BINARY_PACKAGING_MODE BOOL true
;;

View file

@ -0,0 +1 @@
../../../aux/plugins/README

View file

@ -0,0 +1 @@
../../../../aux/plugins/af_packet/README

View file

@ -0,0 +1 @@
../../../../aux/plugins/elasticsearch/README

View file

@ -0,0 +1 @@
../../../../aux/plugins/myricom/README

View file

@ -0,0 +1 @@
../../../../aux/plugins/netmap/README

View file

@ -0,0 +1 @@
../../../../aux/plugins/pf_ring/README

View file

@ -0,0 +1 @@
../../../../aux/plugins/redis/README

View file

@ -0,0 +1 @@
../../../../aux/plugins/tcprs/README

View file

@ -0,0 +1 @@
../../../aux/broker/README

View file

@ -0,0 +1 @@
../../../aux/broker/broker-manual.rst

View file

@ -17,8 +17,11 @@ current, independent component releases.
Broccoli - User Manual <broccoli/broccoli-manual>
Broccoli Python Bindings <broccoli-python/README>
Broccoli Ruby Bindings <broccoli-ruby/README>
Broker - Bro's (New) Messaging Library (README) <broker/README>
Broker - User Manual <broker/broker-manual.rst>
BroControl - Interactive Bro management shell <broctl/README>
Bro-Aux - Small auxiliary tools for Bro <bro-aux/README>
Bro-Plugins - A collection of plugins for Bro <bro-plugins/README>
BTest - A unit testing framework <btest/README>
Capstats - Command-line packet statistic tool <capstats/README>
PySubnetTree - Python module for CIDR lookups<pysubnettree/README>

View file

@ -66,7 +66,7 @@ master_doc = 'index'
# General information about the project.
project = u'Bro'
copyright = u'2013, The Bro Project'
copyright = u'2016, The Bro Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the

View file

@ -3,7 +3,7 @@
Writing Bro Plugins
===================
Bro internally provides plugin API that enables extending
Bro internally provides a plugin API that enables extending
the system dynamically, without modifying the core code base. That way
custom code remains self-contained and can be maintained, compiled,
and installed independently. Currently, plugins can add the following
@ -32,7 +32,7 @@ Quick Start
===========
Writing a basic plugin is quite straight-forward as long as one
follows a few conventions. In the following we walk a simple example
follows a few conventions. In the following we create a simple example
plugin that adds a new built-in function (bif) to Bro: we'll add
``rot13(s: string) : string``, a function that rotates every character
in a string by 13 places.
@ -81,7 +81,7 @@ The syntax of this file is just like any other ``*.bif`` file; we
won't go into it here.
Now we can already compile our plugin, we just need to tell the
configure script that ``init-plugin`` put in place where the Bro
configure script (that ``init-plugin`` created) where the Bro
source tree is located (Bro needs to have been built there first)::
# cd rot13-plugin
@ -99,7 +99,7 @@ option::
# export BRO_PLUGIN_PATH=/path/to/rot13-plugin/build
# bro -N
[...]
Plugin: Demo::Rot13 - <Insert brief description of plugin> (dynamic, version 1)
Demo::Rot13 - <Insert description> (dynamic, version 0.1)
[...]
That looks quite good, except for the dummy description that we should
@ -108,28 +108,30 @@ is about. We do this by editing the ``config.description`` line in
``src/Plugin.cc``, like this::
[...]
plugin::Configuration Configure()
plugin::Configuration Plugin::Configure()
{
plugin::Configuration config;
config.name = "Demo::Rot13";
config.description = "Caesar cipher rotating a string's characters by 13 places.";
config.version.major = 1;
config.version.minor = 0;
config.version.major = 0;
config.version.minor = 1;
return config;
}
[...]
Now rebuild and verify that the description is visible::
# make
[...]
# bro -N | grep Rot13
Plugin: Demo::Rot13 - Caesar cipher rotating a string's characters by 13 places. (dynamic, version 1)
Demo::Rot13 - Caesar cipher rotating a string's characters by 13 places. (dynamic, version 0.1)
Better. Bro can also show us what exactly the plugin provides with the
Bro can also show us what exactly the plugin provides with the
more verbose option ``-NN``::
# bro -NN
[...]
Plugin: Demo::Rot13 - Caesar cipher rotating a string's characters by 13 places. (dynamic, version 1)
Demo::Rot13 - Caesar cipher rotating a string's characters by 13 places. (dynamic, version 0.1)
[Function] Demo::rot13
[...]
@ -157,10 +159,12 @@ The installed version went into
``<bro-install-prefix>/lib/bro/plugins/Demo_Rot13``.
One can distribute the plugin independently of Bro for others to use.
To distribute in source form, just remove the ``build/`` (``make
distclean`` does that) and then tar up the whole ``rot13-plugin/``
To distribute in source form, just remove the ``build/`` directory
(``make distclean`` does that) and then tar up the whole ``rot13-plugin/``
directory. Others then follow the same process as above after
unpacking. To distribute the plugin in binary form, the build process
unpacking.
To distribute the plugin in binary form, the build process
conveniently creates a corresponding tarball in ``build/dist/``. In
this case, it's called ``Demo_Rot13-0.1.tar.gz``, with the version
number coming out of the ``VERSION`` file that ``init-plugin`` put
@ -169,14 +173,14 @@ plugin, but no further source files. Optionally, one can include
further files by specifying them in the plugin's ``CMakeLists.txt``
through the ``bro_plugin_dist_files`` macro; the skeleton does that
for ``README``, ``VERSION``, ``CHANGES``, and ``COPYING``. To use the
plugin through the binary tarball, just unpack it and point
``BRO_PLUGIN_PATH`` there; or copy it into
``<bro-install-prefix>/lib/bro/plugins/`` directly.
plugin through the binary tarball, just unpack it into
``<bro-install-prefix>/lib/bro/plugins/``. Alternatively, if you unpack
it in another location, then you need to point ``BRO_PLUGIN_PATH`` there.
Before distributing your plugin, you should edit some of the meta
files that ``init-plugin`` puts in place. Edit ``README`` and
``VERSION``, and update ``CHANGES`` when you make changes. Also put a
license file in place as ``COPYING``; if BSD is fine, you find a
license file in place as ``COPYING``; if BSD is fine, you will find a
template in ``COPYING.edit-me``.
Plugin Directory Layout
@ -193,7 +197,7 @@ directory. With the skeleton, ``<base>`` corresponds to ``build/``.
must exist, and its content must consist of a single line with the
qualified name of the plugin (e.g., "Demo::Rot13").
``<base>/lib/<plugin-name>-<os>-<arch>.so``
``<base>/lib/<plugin-name>.<os>-<arch>.so``
The shared library containing the plugin's compiled code. Bro will
load this in dynamically at run-time if OS and architecture match
the current platform.
@ -205,8 +209,15 @@ directory. With the skeleton, ``<base>`` corresponds to ``build/``.
"@load"ed.
``scripts``/__load__.bro
A Bro script that will be loaded immediately when the plugin gets
activated. See below for more information on activating plugins.
A Bro script that will be loaded when the plugin gets activated.
When this script executes, any BiF elements that the plugin
defines will already be available. See below for more information
on activating plugins.
``scripts``/__preload__.bro
A Bro script that will be loaded when the plugin gets activated,
but before any BiF elements become available. See below for more
information on activating plugins.
``lib/bif/``
Directory with auto-generated Bro scripts that declare the plugin's
@ -215,8 +226,8 @@ directory. With the skeleton, ``<base>`` corresponds to ``build/``.
Any other files in ``<base>`` are ignored by Bro.
By convention, a plugin should put its custom scripts into sub folders
of ``scripts/``, i.e., ``scripts/<script-namespace>/<script>.bro`` to
avoid conflicts. As usual, you can then put a ``__load__.bro`` in
of ``scripts/``, i.e., ``scripts/<plugin-namespace>/<plugin-name>/<script>.bro``
to avoid conflicts. As usual, you can then put a ``__load__.bro`` in
there as well so that, e.g., ``@load Demo/Rot13`` could load a whole
module in the form of multiple individual scripts.
@ -242,16 +253,30 @@ as well as the ``__bro_plugin__`` magic file and any further
distribution files specified in ``CMakeLists.txt`` (e.g., README,
VERSION). You can find a full list of files installed in
``build/MANIFEST``. Behind the scenes, ``make install`` really just
copies over the binary tarball in ``build/dist``.
unpacks the binary tarball from ``build/dist`` into the destination
directory.
``init-plugin`` will never overwrite existing files. If its target
directory already exists, it will be default decline to do anything.
directory already exists, it will by default decline to do anything.
You can run it with ``-u`` instead to update an existing plugin,
however it will never overwrite any existing files; it will only put
in place files it doesn't find yet. To revert a file back to what
``init-plugin`` created originally, delete it first and then rerun
with ``-u``.
``init-plugin`` puts a ``configure`` script in place that wraps
``cmake`` with a more familiar configure-style configuration. By
default, the script provides two options for specifying paths to the
Bro source (``--bro-dist``) and to the plugin's installation directory
(``--install-root``). To extend ``configure`` with plugin-specific
options (such as search paths for its dependencies) don't edit the
script directly but instead extend ``configure.plugin``, which
``configure`` includes. That way you will be able to more easily
update ``configure`` in the future when the distribution version
changes. In ``configure.plugin`` you can use the predefined shell
function ``append_cache_entry`` to seed values into the CMake cache;
see the installed skeleton version and existing plugins for examples.
Activating a Plugin
===================
@ -261,7 +286,9 @@ Activating a plugin will:
1. Load the dynamic module
2. Make any bif items available
3. Add the ``scripts/`` directory to ``BROPATH``
4. Load ``scripts/__load__.bro``
4. Load ``scripts/__preload__.bro``
5. Make BiF elements available to scripts.
6. Load ``scripts/__load__.bro``
By default, Bro will automatically activate all dynamic plugins found
in its search path ``BRO_PLUGIN_PATH``. However, in bare mode (``bro
@ -356,18 +383,19 @@ Testing Plugins
===============
A plugin should come with a test suite to exercise its functionality.
The ``init-plugin`` script puts in place a basic </btest/README> setup
The ``init-plugin`` script puts in place a basic
:doc:`BTest <../../components/btest/README>` setup
to start with. Initially, it comes with a single test that just checks
that Bro loads the plugin correctly. It won't have a baseline yet, so
let's get that in place::
# cd tests
# btest -d
[ 0%] plugin.loading ... failed
[ 0%] rot13.show-plugin ... failed
% 'btest-diff output' failed unexpectedly (exit code 100)
% cat .diag
== File ===============================
Demo::Rot13 - Caesar cipher rotating a string's characters by 13 places. (dynamic, version 1.0)
Demo::Rot13 - Caesar cipher rotating a string's characters by 13 places. (dynamic, version 0.1)
[Function] Demo::rot13
== Error ===============================
@ -400,8 +428,8 @@ correctly::
Check the output::
# btest -d plugin/rot13.bro
[ 0%] plugin.rot13 ... failed
# btest -d rot13/bif-rot13.bro
[ 0%] rot13.bif-rot13 ... failed
% 'btest-diff output' failed unexpectedly (exit code 100)
% cat .diag
== File ===============================
@ -416,7 +444,7 @@ Check the output::
Install the baseline::
# btest -U plugin/rot13.bro
# btest -U rot13/bif-rot13.bro
all 1 tests successful
Run the test-suite::
@ -444,7 +472,7 @@ your plugin's debugging output with ``-B plugin-<name>``, where
``<name>`` is the name of the plugin as returned by its
``Configure()`` method, yet with the namespace-separator ``::``
replaced with a simple dash. Example: If the plugin is called
``Bro::Demo``, use ``-B plugin-Bro-Demo``. As usual, the debugging
``Demo::Rot13``, use ``-B plugin-Demo-Rot13``. As usual, the debugging
output will be recorded to ``debug.log`` if Bro's compiled in debug
mode.

200
doc/frameworks/broker.rst Normal file
View file

@ -0,0 +1,200 @@
.. _brokercomm-framework:
======================================
Broker-Enabled Communication Framework
======================================
.. rst-class:: opening
Bro can now use the `Broker Library
<../components/broker/README.html>`_ to exchange information with
other Bro processes.
.. contents::
Connecting to Peers
===================
Communication via Broker must first be turned on via
:bro:see:`BrokerComm::enable`.
Bro can accept incoming connections by calling :bro:see:`BrokerComm::listen`
and then monitor connection status updates via the
:bro:see:`BrokerComm::incoming_connection_established` and
:bro:see:`BrokerComm::incoming_connection_broken` events.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-listener.bro
Bro can initiate outgoing connections by calling :bro:see:`BrokerComm::connect`
and then monitor connection status updates via the
:bro:see:`BrokerComm::outgoing_connection_established`,
:bro:see:`BrokerComm::outgoing_connection_broken`, and
:bro:see:`BrokerComm::outgoing_connection_incompatible` events.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/connecting-connector.bro
Remote Printing
===============
To receive remote print messages, first use the
:bro:see:`BrokerComm::subscribe_to_prints` function to advertise to peers a
topic prefix of interest and then create an event handler for
:bro:see:`BrokerComm::print_handler` to handle any print messages that are
received.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-listener.bro
To send remote print messages, just call :bro:see:`BrokerComm::print`.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/printing-connector.bro
Notice that the subscriber only used the prefix "bro/print/", but is
able to receive messages with full topics of "bro/print/hi",
"bro/print/stuff", and "bro/print/bye". The model here is that the
publisher of a message checks for all subscribers who advertised
interest in a prefix of that message's topic and sends it to them.
Message Format
--------------
For other applications that want to exchange print messages with Bro,
the Broker message format is simply:
.. code:: c++
broker::message{std::string{}};
Remote Events
=============
Receiving remote events is similar to remote prints. Just use the
:bro:see:`BrokerComm::subscribe_to_events` function and possibly define any
new events along with handlers that peers may want to send.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/events-listener.bro
There are two different ways to send events. The first is to call the
:bro:see:`BrokerComm::event` function directly. The second option is to call
the :bro:see:`BrokerComm::auto_event` function where you specify a
particular event that will be automatically sent to peers whenever the
event is called locally via the normal event invocation syntax.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/events-connector.bro
Again, the subscription model is prefix-based.
Message Format
--------------
For other applications that want to exchange event messages with Bro,
the Broker message format is:
.. code:: c++
broker::message{std::string{}, ...};
The first parameter is the name of the event and the remaining ``...``
are its arguments, which are any of the supported Broker data types as
they correspond to the Bro types for the event named in the first
parameter of the message.
Remote Logging
==============
.. btest-include:: ${DOC_ROOT}/frameworks/broker/testlog.bro
Use the :bro:see:`BrokerComm::subscribe_to_logs` function to advertise interest
in logs written by peers. The topic names that Bro uses are implicitly of the
form "bro/log/<stream-name>".
.. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-listener.bro
To send remote logs either redef :bro:see:`Log::enable_remote_logging` or
use the :bro:see:`BrokerComm::enable_remote_logs` function. The former
allows any log stream to be sent to peers while the latter enables remote
logging for particular streams.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/logs-connector.bro
Message Format
--------------
For other applications that want to exchange log messages with Bro,
the Broker message format is:
.. code:: c++
broker::message{broker::enum_value{}, broker::record{}};
The enum value corresponds to the stream's :bro:see:`Log::ID` value, and
the record corresponds to a single entry of that log's columns record,
in this case a ``Test::Info`` value.
Tuning Access Control
=====================
By default, endpoints do not restrict the message topics that it sends
to peers and do not restrict what message topics and data store
identifiers get advertised to peers. These are the default
:bro:see:`BrokerComm::EndpointFlags` supplied to :bro:see:`BrokerComm::enable`.
If not using the ``auto_publish`` flag, one can use the
:bro:see:`BrokerComm::publish_topic` and :bro:see:`BrokerComm::unpublish_topic`
functions to manipulate the set of message topics (must match exactly)
that are allowed to be sent to peer endpoints. These settings take
precedence over the per-message ``peers`` flag supplied to functions
that take a :bro:see:`BrokerComm::SendFlags` such as :bro:see:`BrokerComm::print`,
:bro:see:`BrokerComm::event`, :bro:see:`BrokerComm::auto_event` or
:bro:see:`BrokerComm::enable_remote_logs`.
If not using the ``auto_advertise`` flag, one can use the
:bro:see:`BrokerComm::advertise_topic` and
:bro:see:`BrokerComm::unadvertise_topic` functions
to manipulate the set of topic prefixes that are allowed to be
advertised to peers. If an endpoint does not advertise a topic prefix, then
the only way peers can send messages to it is via the ``unsolicited``
flag of :bro:see:`BrokerComm::SendFlags` and choosing a topic with a matching
prefix (i.e. full topic may be longer than receivers prefix, just the
prefix needs to match).
Distributed Data Stores
=======================
There are three flavors of key-value data store interfaces: master,
clone, and frontend.
A frontend is the common interface to query and modify data stores.
That is, a clone is a specific type of frontend and a master is also a
specific type of frontend, but a standalone frontend can also exist to
e.g. query and modify the contents of a remote master store without
actually "owning" any of the contents itself.
A master data store can be cloned from remote peers which may then
perform lightweight, local queries against the clone, which
automatically stays synchronized with the master store. Clones cannot
modify their content directly, instead they send modifications to the
centralized master store which applies them and then broadcasts them to
all clones.
Master and clone stores get to choose what type of storage backend to
use. E.g. In-memory versus SQLite for persistence. Note that if clones
are used, then data store sizes must be able to fit within memory
regardless of the storage backend as a single snapshot of the master
store is sent in a single chunk to initialize the clone.
Data stores also support expiration on a per-key basis either using an
absolute point in time or a relative amount of time since the entry's
last modification time.
.. btest-include:: ${DOC_ROOT}/frameworks/broker/stores-listener.bro
.. btest-include:: ${DOC_ROOT}/frameworks/broker/stores-connector.bro
In the above example, if a local copy of the store contents isn't
needed, just replace the :bro:see:`BrokerStore::create_clone` call with
:bro:see:`BrokerStore::create_frontend`. Queries will then be made against
the remote master store instead of the local clone.
Note that all data store queries must be made within Bro's asynchronous
``when`` statements and must specify a timeout block.

View file

@ -0,0 +1,18 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "connector";
event bro_init()
{
BrokerComm::enable();
BrokerComm::connect("127.0.0.1", broker_port, 1sec);
}
event BrokerComm::outgoing_connection_established(peer_address: string,
peer_port: port,
peer_name: string)
{
print "BrokerComm::outgoing_connection_established",
peer_address, peer_port, peer_name;
terminate();
}

View file

@ -0,0 +1,20 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "listener";
event bro_init()
{
BrokerComm::enable();
BrokerComm::listen(broker_port, "127.0.0.1");
}
event BrokerComm::incoming_connection_established(peer_name: string)
{
print "BrokerComm::incoming_connection_established", peer_name;
}
event BrokerComm::incoming_connection_broken(peer_name: string)
{
print "BrokerComm::incoming_connection_broken", peer_name;
terminate();
}

View file

@ -0,0 +1,31 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "connector";
global my_event: event(msg: string, c: count);
global my_auto_event: event(msg: string, c: count);
event bro_init()
{
BrokerComm::enable();
BrokerComm::connect("127.0.0.1", broker_port, 1sec);
BrokerComm::auto_event("bro/event/my_auto_event", my_auto_event);
}
event BrokerComm::outgoing_connection_established(peer_address: string,
peer_port: port,
peer_name: string)
{
print "BrokerComm::outgoing_connection_established",
peer_address, peer_port, peer_name;
BrokerComm::event("bro/event/my_event", BrokerComm::event_args(my_event, "hi", 0));
event my_auto_event("stuff", 88);
BrokerComm::event("bro/event/my_event", BrokerComm::event_args(my_event, "...", 1));
event my_auto_event("more stuff", 51);
BrokerComm::event("bro/event/my_event", BrokerComm::event_args(my_event, "bye", 2));
}
event BrokerComm::outgoing_connection_broken(peer_address: string,
peer_port: port)
{
terminate();
}

View file

@ -0,0 +1,36 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "listener";
global msg_count = 0;
global my_event: event(msg: string, c: count);
global my_auto_event: event(msg: string, c: count);
event bro_init()
{
BrokerComm::enable();
BrokerComm::subscribe_to_events("bro/event/");
BrokerComm::listen(broker_port, "127.0.0.1");
}
event BrokerComm::incoming_connection_established(peer_name: string)
{
print "BrokerComm::incoming_connection_established", peer_name;
}
event my_event(msg: string, c: count)
{
++msg_count;
print "got my_event", msg, c;
if ( msg_count == 5 )
terminate();
}
event my_auto_event(msg: string, c: count)
{
++msg_count;
print "got my_auto_event", msg, c;
if ( msg_count == 5 )
terminate();
}

View file

@ -0,0 +1,40 @@
@load ./testlog
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "connector";
redef Log::enable_local_logging = F;
redef Log::enable_remote_logging = F;
global n = 0;
event bro_init()
{
BrokerComm::enable();
BrokerComm::enable_remote_logs(Test::LOG);
BrokerComm::connect("127.0.0.1", broker_port, 1sec);
}
event do_write()
{
if ( n == 6 )
return;
Log::write(Test::LOG, [$msg = "ping", $num = n]);
++n;
event do_write();
}
event BrokerComm::outgoing_connection_established(peer_address: string,
peer_port: port,
peer_name: string)
{
print "BrokerComm::outgoing_connection_established",
peer_address, peer_port, peer_name;
event do_write();
}
event BrokerComm::outgoing_connection_broken(peer_address: string,
peer_port: port)
{
terminate();
}

View file

@ -0,0 +1,25 @@
@load ./testlog
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "listener";
event bro_init()
{
BrokerComm::enable();
BrokerComm::subscribe_to_logs("bro/log/Test::LOG");
BrokerComm::listen(broker_port, "127.0.0.1");
}
event BrokerComm::incoming_connection_established(peer_name: string)
{
print "BrokerComm::incoming_connection_established", peer_name;
}
event Test::log_test(rec: Test::Info)
{
print "wrote log", rec;
if ( rec$num == 5 )
terminate();
}

View file

@ -0,0 +1,26 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "connector";
event bro_init()
{
BrokerComm::enable();
BrokerComm::connect("127.0.0.1", broker_port, 1sec);
}
event BrokerComm::outgoing_connection_established(peer_address: string,
peer_port: port,
peer_name: string)
{
print "BrokerComm::outgoing_connection_established",
peer_address, peer_port, peer_name;
BrokerComm::print("bro/print/hi", "hello");
BrokerComm::print("bro/print/stuff", "...");
BrokerComm::print("bro/print/bye", "goodbye");
}
event BrokerComm::outgoing_connection_broken(peer_address: string,
peer_port: port)
{
terminate();
}

View file

@ -0,0 +1,25 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T;
redef BrokerComm::endpoint_name = "listener";
global msg_count = 0;
event bro_init()
{
BrokerComm::enable();
BrokerComm::subscribe_to_prints("bro/print/");
BrokerComm::listen(broker_port, "127.0.0.1");
}
event BrokerComm::incoming_connection_established(peer_name: string)
{
print "BrokerComm::incoming_connection_established", peer_name;
}
event BrokerComm::print_handler(msg: string)
{
++msg_count;
print "got print message", msg;
if ( msg_count == 3 )
terminate();
}

View file

@ -0,0 +1,53 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T;
global h: opaque of BrokerStore::Handle;
function dv(d: BrokerComm::Data): BrokerComm::DataVector
{
local rval: BrokerComm::DataVector;
rval[0] = d;
return rval;
}
global ready: event();
event BrokerComm::outgoing_connection_broken(peer_address: string,
peer_port: port)
{
terminate();
}
event BrokerComm::outgoing_connection_established(peer_address: string,
peer_port: port,
peer_name: string)
{
local myset: set[string] = {"a", "b", "c"};
local myvec: vector of string = {"alpha", "beta", "gamma"};
h = BrokerStore::create_master("mystore");
BrokerStore::insert(h, BrokerComm::data("one"), BrokerComm::data(110));
BrokerStore::insert(h, BrokerComm::data("two"), BrokerComm::data(223));
BrokerStore::insert(h, BrokerComm::data("myset"), BrokerComm::data(myset));
BrokerStore::insert(h, BrokerComm::data("myvec"), BrokerComm::data(myvec));
BrokerStore::increment(h, BrokerComm::data("one"));
BrokerStore::decrement(h, BrokerComm::data("two"));
BrokerStore::add_to_set(h, BrokerComm::data("myset"), BrokerComm::data("d"));
BrokerStore::remove_from_set(h, BrokerComm::data("myset"), BrokerComm::data("b"));
BrokerStore::push_left(h, BrokerComm::data("myvec"), dv(BrokerComm::data("delta")));
BrokerStore::push_right(h, BrokerComm::data("myvec"), dv(BrokerComm::data("omega")));
when ( local res = BrokerStore::size(h) )
{
print "master size", res;
event ready();
}
timeout 10sec
{ print "timeout"; }
}
event bro_init()
{
BrokerComm::enable();
BrokerComm::connect("127.0.0.1", broker_port, 1secs);
BrokerComm::auto_event("bro/event/ready", ready);
}

View file

@ -0,0 +1,43 @@
const broker_port: port = 9999/tcp &redef;
redef exit_only_after_terminate = T;
global h: opaque of BrokerStore::Handle;
global expected_key_count = 4;
global key_count = 0;
function do_lookup(key: string)
{
when ( local res = BrokerStore::lookup(h, BrokerComm::data(key)) )
{
++key_count;
print "lookup", key, res;
if ( key_count == expected_key_count )
terminate();
}
timeout 10sec
{ print "timeout", key; }
}
event ready()
{
h = BrokerStore::create_clone("mystore");
when ( local res = BrokerStore::keys(h) )
{
print "clone keys", res;
do_lookup(BrokerComm::refine_to_string(BrokerComm::vector_lookup(res$result, 0)));
do_lookup(BrokerComm::refine_to_string(BrokerComm::vector_lookup(res$result, 1)));
do_lookup(BrokerComm::refine_to_string(BrokerComm::vector_lookup(res$result, 2)));
do_lookup(BrokerComm::refine_to_string(BrokerComm::vector_lookup(res$result, 3)));
}
timeout 10sec
{ print "timeout"; }
}
event bro_init()
{
BrokerComm::enable();
BrokerComm::subscribe_to_events("bro/event/ready");
BrokerComm::listen(broker_port, "127.0.0.1");
}

View file

@ -0,0 +1,18 @@
module Test;
export {
redef enum Log::ID += { LOG };
type Info: record {
msg: string &log;
num: count &log;
};
global log_test: event(rec: Test::Info);
}
event bro_init() &priority=5
{
BrokerComm::enable();
Log::create_stream(Test::LOG, [$columns=Test::Info, $ev=log_test, $path="test"]);
}

View file

@ -1,7 +1,8 @@
event file_mime_type(f: fa_file, mime_type: string)
event file_sniff(f: fa_file, meta: fa_metadata)
{
if ( ! meta?$mime_type ) return;
print "new file", f$id;
if ( mime_type == "text/plain" )
if ( meta$mime_type == "text/plain" )
Files::add_analyzer(f, Files::ANALYZER_MD5);
}

View file

@ -20,11 +20,13 @@ GeoLocation
Install libGeoIP
----------------
Before building Bro, you need to install libGeoIP.
* FreeBSD:
.. console::
sudo pkg_add -r GeoIP
sudo pkg install GeoIP
* RPM/RedHat-based Linux:
@ -40,80 +42,99 @@ Install libGeoIP
* Mac OS X:
Vanilla OS X installations don't ship with libGeoIP, but if
installed from your preferred package management system (e.g.
MacPorts, Fink, or Homebrew), they should be automatically detected
and Bro will compile against them.
You need to install from your preferred package management system
(e.g. MacPorts, Fink, or Homebrew). The name of the package that you need
may be libgeoip, geoip, or geoip-dev, depending on which package management
system you are using.
GeoIPLite Database Installation
------------------------------------
-------------------------------
A country database for GeoIPLite is included when you do the C API
install, but for Bro, we are using the city database which includes
cities and regions in addition to countries.
`Download <http://www.maxmind.com/app/geolitecity>`__ the GeoLite city
binary database.
binary database:
.. console::
.. console::
wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
gunzip GeoLiteCity.dat.gz
Next, the file needs to be put in the database directory. This directory
should already exist and will vary depending on which platform and package
you are using. For FreeBSD, use ``/usr/local/share/GeoIP``. For Linux,
use ``/usr/share/GeoIP`` or ``/var/lib/GeoIP`` (choose whichever one
Next, the file needs to be renamed and put in the GeoIP database directory.
This directory should already exist and will vary depending on which platform
and package you are using. For FreeBSD, use ``/usr/local/share/GeoIP``. For
Linux, use ``/usr/share/GeoIP`` or ``/var/lib/GeoIP`` (choose whichever one
already exists).
.. console::
.. console::
mv GeoLiteCity.dat <path_to_database_dir>/GeoIPCity.dat
Note that there is a separate database for IPv6 addresses, which can also
be installed if you want GeoIP functionality for IPv6.
Testing
-------
Before using the GeoIP functionality, it is a good idea to verify that
everything is setup correctly. After installing libGeoIP and the GeoIP city
database, and building Bro, you can quickly check if the GeoIP functionality
works by running a command like this:
.. console::
bro -e "print lookup_location(8.8.8.8);"
If you see an error message similar to "Failed to open GeoIP City database",
then you may need to either rename or move your GeoIP city database file (the
error message should give you the full pathname of the database file that
Bro is looking for).
If you see an error message similar to "Bro was not configured for GeoIP
support", then you need to rebuild Bro and make sure it is linked against
libGeoIP. Normally, if libGeoIP is installed correctly then it should
automatically be found when building Bro. If this doesn't happen, then
you may need to specify the path to the libGeoIP installation
(e.g. ``./configure --with-geoip=<path>``).
Usage
-----
There is a single built in function that provides the GeoIP
functionality:
There is a built-in function that provides the GeoIP functionality:
.. code:: bro
function lookup_location(a:addr): geo_location
There is also the :bro:see:`geo_location` data structure that is returned
from the :bro:see:`lookup_location` function:
.. code:: bro
type geo_location: record {
country_code: string;
region: string;
city: string;
latitude: double;
longitude: double;
};
The return value of the :bro:see:`lookup_location` function is a record
type called :bro:see:`geo_location`, and it consists of several fields
containing the country, region, city, latitude, and longitude of the specified
IP address. Since one or more fields in this record will be uninitialized
for some IP addresses (for example, the country and region of an IP address
might be known, but the city could be unknown), a field should be checked
if it has a value before trying to access the value.
Example
-------
To write a line in a log file for every ftp connection from hosts in
Ohio, this is now very easy:
To show every ftp connection from hosts in Ohio, this is now very easy:
.. code:: bro
global ftp_location_log: file = open_log_file("ftp-location");
event ftp_reply(c: connection, code: count, msg: string, cont_resp: bool)
{
local client = c$id$orig_h;
local loc = lookup_location(client);
if (loc$region == "OH" && loc$country_code == "US")
if (loc?$region && loc$region == "OH" && loc$country_code == "US")
{
print ftp_location_log, fmt("FTP Connection from:%s (%s,%s,%s)", client, loc$city, loc$region, loc$country_code);
local city = loc?$city ? loc$city : "<unknown>";
print fmt("FTP Connection from:%s (%s,%s,%s)", client, city,
loc$region, loc$country_code);
}
}

View file

@ -14,4 +14,4 @@ Frameworks
notice
signatures
sumstats
broker

View file

@ -32,7 +32,8 @@ For this example we assume that we want to import data from a blacklist
that contains server IP addresses as well as the timestamp and the reason
for the block.
An example input file could look like this:
An example input file could look like this (note that all fields must be
tab-separated):
::
@ -63,19 +64,23 @@ The two records are defined as:
reason: string;
};
Note that the names of the fields in the record definitions have to correspond
Note that the names of the fields in the record definitions must correspond
to the column names listed in the '#fields' line of the log file, in this
case 'ip', 'timestamp', and 'reason'.
case 'ip', 'timestamp', and 'reason'. Also note that the ordering of the
columns does not matter, because each column is identified by name.
The log file is read into the table with a simple call of the ``add_table``
function:
The log file is read into the table with a simple call of the
:bro:id:`Input::add_table` function:
.. code:: bro
global blacklist: table[addr] of Val = table();
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist]);
Input::remove("blacklist");
event bro_init() {
Input::add_table([$source="blacklist.file", $name="blacklist",
$idx=Idx, $val=Val, $destination=blacklist]);
Input::remove("blacklist");
}
With these three lines we first create an empty table that should contain the
blacklist data and then instruct the input framework to open an input stream
@ -92,7 +97,7 @@ Because of this, the data is not immediately accessible. Depending on the
size of the data source it might take from a few milliseconds up to a few
seconds until all data is present in the table. Please note that this means
that when Bro is running without an input source or on very short captured
files, it might terminate before the data is present in the system (because
files, it might terminate before the data is present in the table (because
Bro already handled all packets before the import thread finished).
Subsequent calls to an input source are queued until the previous action has
@ -101,8 +106,8 @@ been completed. Because of this, it is, for example, possible to call
will remain queued until the first read has been completed.
Once the input framework finishes reading from a data source, it fires
the ``end_of_data`` event. Once this event has been received all data
from the input file is available in the table.
the :bro:id:`Input::end_of_data` event. Once this event has been received all
data from the input file is available in the table.
.. code:: bro
@ -111,9 +116,9 @@ from the input file is available in the table.
print blacklist;
}
The table can also already be used while the data is still being read - it
just might not contain all lines in the input file when the event has not
yet fired. After it has been populated it can be used like any other Bro
The table can be used while the data is still being read - it
just might not contain all lines from the input file before the event has
fired. After the table has been populated it can be used like any other Bro
table and blacklist entries can easily be tested:
.. code:: bro
@ -130,10 +135,11 @@ changing. For these cases, the Bro input framework supports several ways to
deal with changing data files.
The first, very basic method is an explicit refresh of an input stream. When
an input stream is open, the function ``force_update`` can be called. This
will trigger a complete refresh of the table; any changed elements from the
file will be updated. After the update is finished the ``end_of_data``
event will be raised.
an input stream is open (this means it has not yet been removed by a call to
:bro:id:`Input::remove`), the function :bro:id:`Input::force_update` can be
called. This will trigger a complete refresh of the table; any changed
elements from the file will be updated. After the update is finished the
:bro:id:`Input::end_of_data` event will be raised.
In our example the call would look like:
@ -141,30 +147,35 @@ In our example the call would look like:
Input::force_update("blacklist");
The input framework also supports two automatic refresh modes. The first mode
continually checks if a file has been changed. If the file has been changed, it
Alternatively, the input framework can automatically refresh the table
contents when it detects a change to the input file. To use this feature,
you need to specify a non-default read mode by setting the ``mode`` option
of the :bro:id:`Input::add_table` call. Valid values are ``Input::MANUAL``
(the default), ``Input::REREAD`` and ``Input::STREAM``. For example,
setting the value of the ``mode`` option in the previous example
would look like this:
.. code:: bro
Input::add_table([$source="blacklist.file", $name="blacklist",
$idx=Idx, $val=Val, $destination=blacklist,
$mode=Input::REREAD]);
When using the reread mode (i.e., ``$mode=Input::REREAD``), Bro continually
checks if the input file has been changed. If the file has been changed, it
is re-read and the data in the Bro table is updated to reflect the current
state. Each time a change has been detected and all the new data has been
read into the table, the ``end_of_data`` event is raised.
The second mode is a streaming mode. This mode assumes that the source data
file is an append-only file to which new data is continually appended. Bro
continually checks for new data at the end of the file and will add the new
data to the table. If newer lines in the file have the same index as previous
lines, they will overwrite the values in the output table. Because of the
nature of streaming reads (data is continually added to the table),
the ``end_of_data`` event is never raised when using streaming reads.
When using the streaming mode (i.e., ``$mode=Input::STREAM``), Bro assumes
that the source data file is an append-only file to which new data is
continually appended. Bro continually checks for new data at the end of
the file and will add the new data to the table. If newer lines in the
file have the same index as previous lines, they will overwrite the
values in the output table. Because of the nature of streaming reads
(data is continually added to the table), the ``end_of_data`` event
is never raised when using streaming reads.
The reading mode can be selected by setting the ``mode`` option of the
add_table call. Valid values are ``MANUAL`` (the default), ``REREAD``
and ``STREAM``.
Hence, when adding ``$mode=Input::REREAD`` to the previous example, the
blacklist table will always reflect the state of the blacklist input file.
.. code:: bro
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD]);
Receiving change events
-----------------------
@ -173,34 +184,40 @@ When re-reading files, it might be interesting to know exactly which lines in
the source files have changed.
For this reason, the input framework can raise an event each time when a data
item is added to, removed from or changed in a table.
item is added to, removed from, or changed in a table.
The event definition looks like this:
The event definition looks like this (note that you can change the name of
this event in your own Bro script):
.. code:: bro
event entry(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) {
# act on values
event entry(description: Input::TableDescription, tpe: Input::Event,
left: Idx, right: Val) {
# do something here...
print fmt("%s = %s", left, right);
}
The event has to be specified in ``$ev`` in the ``add_table`` call:
The event must be specified in ``$ev`` in the ``add_table`` call:
.. code:: bro
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD, $ev=entry]);
Input::add_table([$source="blacklist.file", $name="blacklist",
$idx=Idx, $val=Val, $destination=blacklist,
$mode=Input::REREAD, $ev=entry]);
The ``description`` field of the event contains the arguments that were
The ``description`` argument of the event contains the arguments that were
originally supplied to the add_table call. Hence, the name of the stream can,
for example, be accessed with ``description$name``. ``tpe`` is an enum
containing the type of the change that occurred.
for example, be accessed with ``description$name``. The ``tpe`` argument of the
event is an enum containing the type of the change that occurred.
If a line that was not previously present in the table has been added,
then ``tpe`` will contain ``Input::EVENT_NEW``. In this case ``left`` contains
the index of the added table entry and ``right`` contains the values of the
added entry.
then the value of ``tpe`` will be ``Input::EVENT_NEW``. In this case ``left``
contains the index of the added table entry and ``right`` contains the
values of the added entry.
If a table entry that already was present is altered during the re-reading or
streaming read of a file, ``tpe`` will contain ``Input::EVENT_CHANGED``. In
streaming read of a file, then the value of ``tpe`` will be
``Input::EVENT_CHANGED``. In
this case ``left`` contains the index of the changed table entry and ``right``
contains the values of the entry before the change. The reason for this is
that the table already has been updated when the event is raised. The current
@ -208,8 +225,9 @@ value in the table can be ascertained by looking up the current table value.
Hence it is possible to compare the new and the old values of the table.
If a table element is removed because it was no longer present during a
re-read, then ``tpe`` will contain ``Input::REMOVED``. In this case ``left``
contains the index and ``right`` the values of the removed element.
re-read, then the value of ``tpe`` will be ``Input::EVENT_REMOVED``. In this
case ``left`` contains the index and ``right`` the values of the removed
element.
Filtering data during import
@ -222,24 +240,26 @@ can either accept or veto the change by returning true for an accepted
change and false for a rejected change. Furthermore, it can alter the data
before it is written to the table.
The following example filter will reject to add entries to the table when
The following example filter will reject adding entries to the table when
they were generated over a month ago. It will accept all changes and all
removals of values that are already present in the table.
.. code:: bro
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD,
$pred(typ: Input::Event, left: Idx, right: Val) = {
if ( typ != Input::EVENT_NEW ) {
return T;
}
return ( ( current_time() - right$timestamp ) < (30 day) );
}]);
Input::add_table([$source="blacklist.file", $name="blacklist",
$idx=Idx, $val=Val, $destination=blacklist,
$mode=Input::REREAD,
$pred(typ: Input::Event, left: Idx, right: Val) = {
if ( typ != Input::EVENT_NEW ) {
return T;
}
return (current_time() - right$timestamp) < 30day;
}]);
To change elements while they are being imported, the predicate function can
manipulate ``left`` and ``right``. Note that predicate functions are called
before the change is committed to the table. Hence, when a table element is
changed (``tpe`` is ``INPUT::EVENT_CHANGED``), ``left`` and ``right``
changed (``typ`` is ``Input::EVENT_CHANGED``), ``left`` and ``right``
contain the new values, but the destination (``blacklist`` in our example)
still contains the old values. This allows predicate functions to examine
the changes between the old and the new version before deciding if they
@ -250,14 +270,19 @@ Different readers
The input framework supports different kinds of readers for different kinds
of source data files. At the moment, the default reader reads ASCII files
formatted in the Bro log file format (tab-separated values). At the moment,
Bro comes with two other readers. The ``RAW`` reader reads a file that is
split by a specified record separator (usually newline). The contents are
formatted in the Bro log file format (tab-separated values with a "#fields"
header line). Several other readers are included in Bro.
The raw reader reads a file that is
split by a specified record separator (newline by default). The contents are
returned line-by-line as strings; it can, for example, be used to read
configuration files and the like and is probably
only useful in the event mode and not for reading data to tables.
Another included reader is the ``BENCHMARK`` reader, which is being used
The binary reader is intended to be used with file analysis input streams (and
is the default type of reader for those streams).
The benchmark reader is being used
to optimize the speed of the input framework. It can generate arbitrary
amounts of semi-random data in all Bro data types supported by the input
framework.
@ -270,75 +295,17 @@ aforementioned ones:
logging-input-sqlite
Add_table options
-----------------
This section lists all possible options that can be used for the add_table
function and gives a short explanation of their use. Most of the options
already have been discussed in the previous sections.
The possible fields that can be set for a table stream are:
``source``
A mandatory string identifying the source of the data.
For the ASCII reader this is the filename.
``name``
A mandatory name for the filter that can later be used
to manipulate it further.
``idx``
Record type that defines the index of the table.
``val``
Record type that defines the values of the table.
``reader``
The reader used for this stream. Default is ``READER_ASCII``.
``mode``
The mode in which the stream is opened. Possible values are
``MANUAL``, ``REREAD`` and ``STREAM``. Default is ``MANUAL``.
``MANUAL`` means that the file is not updated after it has
been read. Changes to the file will not be reflected in the
data Bro knows. ``REREAD`` means that the whole file is read
again each time a change is found. This should be used for
files that are mapped to a table where individual lines can
change. ``STREAM`` means that the data from the file is
streamed. Events / table entries will be generated as new
data is appended to the file.
``destination``
The destination table.
``ev``
Optional event that is raised, when values are added to,
changed in, or deleted from the table. Events are passed an
Input::Event description as the first argument, the index
record as the second argument and the values as the third
argument.
``pred``
Optional predicate, that can prevent entries from being added
to the table and events from being sent.
``want_record``
Boolean value, that defines if the event wants to receive the
fields inside of a single record value, or individually
(default). This can be used if ``val`` is a record
containing only one type. In this case, if ``want_record`` is
set to false, the table will contain elements of the type
contained in ``val``.
Reading Data to Events
======================
The second supported mode of the input framework is reading data to Bro
events instead of reading them to a table using event streams.
events instead of reading them to a table.
Event streams work very similarly to table streams that were already
discussed in much detail. To read the blacklist of the previous example
into an event stream, the following Bro code could be used:
into an event stream, the :bro:id:`Input::add_event` function is used.
For example:
.. code:: bro
@ -348,12 +315,15 @@ into an event stream, the following Bro code could be used:
reason: string;
};
event blacklistentry(description: Input::EventDescription, tpe: Input::Event, ip: addr, timestamp: time, reason: string) {
# work with event data
event blacklistentry(description: Input::EventDescription,
t: Input::Event, data: Val) {
# do something here...
print "data:", data;
}
event bro_init() {
Input::add_event([$source="blacklist.file", $name="blacklist", $fields=Val, $ev=blacklistentry]);
Input::add_event([$source="blacklist.file", $name="blacklist",
$fields=Val, $ev=blacklistentry]);
}
@ -364,52 +334,3 @@ data types are provided in a single record definition.
Apart from this, event streams work exactly the same as table streams and
support most of the options that are also supported for table streams.
The options that can be set when creating an event stream with
``add_event`` are:
``source``
A mandatory string identifying the source of the data.
For the ASCII reader this is the filename.
``name``
A mandatory name for the stream that can later be used
to remove it.
``fields``
Name of a record type containing the fields, which should be
retrieved from the input stream.
``ev``
The event which is fired, after a line has been read from the
input source. The first argument that is passed to the event
is an Input::Event structure, followed by the data, either
inside of a record (if ``want_record is set``) or as
individual fields. The Input::Event structure can contain
information, if the received line is ``NEW``, has been
``CHANGED`` or ``DELETED``. Since the ASCII reader cannot
track this information for event filters, the value is
always ``NEW`` at the moment.
``mode``
The mode in which the stream is opened. Possible values are
``MANUAL``, ``REREAD`` and ``STREAM``. Default is ``MANUAL``.
``MANUAL`` means that the file is not updated after it has
been read. Changes to the file will not be reflected in the
data Bro knows. ``REREAD`` means that the whole file is read
again each time a change is found. This should be used for
files that are mapped to a table where individual lines can
change. ``STREAM`` means that the data from the file is
streamed. Events / table entries will be generated as new
data is appended to the file.
``reader``
The reader used for this stream. Default is ``READER_ASCII``.
``want_record``
Boolean value, that defines if the event wants to receive the
fields inside of a single record value, or individually
(default). If this is set to true, the event will receive a
single record of the type provided in ``fields``.

View file

@ -23,17 +23,18 @@ In contrast to the ASCII reader and writer, the SQLite plugins have not yet
seen extensive use in production environments. While we are not aware
of any issues with them, we urge to caution when using them
in production environments. There could be lingering issues which only occur
when the plugins are used with high amounts of data or in high-load environments.
when the plugins are used with high amounts of data or in high-load
environments.
Logging Data into SQLite Databases
==================================
Logging support for SQLite is available in all Bro installations starting with
version 2.2. There is no need to load any additional scripts or for any compile-time
configurations.
version 2.2. There is no need to load any additional scripts or for any
compile-time configurations.
Sending data from existing logging streams to SQLite is rather straightforward. You
have to define a filter which specifies SQLite as the writer.
Sending data from existing logging streams to SQLite is rather straightforward.
You have to define a filter which specifies SQLite as the writer.
The following example code adds SQLite as a filter for the connection log:
@ -44,15 +45,15 @@ The following example code adds SQLite as a filter for the connection log:
# Make sure this parses correctly at least.
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-conn-filter.bro
Bro will create the database file ``/var/db/conn.sqlite``, if it does not already exist.
It will also create a table with the name ``conn`` (if it does not exist) and start
appending connection information to the table.
Bro will create the database file ``/var/db/conn.sqlite``, if it does not
already exist. It will also create a table with the name ``conn`` (if it
does not exist) and start appending connection information to the table.
At the moment, SQLite databases are not rotated the same way ASCII log-files are. You
have to take care to create them in an adequate location.
At the moment, SQLite databases are not rotated the same way ASCII log-files
are. You have to take care to create them in an adequate location.
If you examine the resulting SQLite database, the schema will contain the same fields
that are present in the ASCII log files::
If you examine the resulting SQLite database, the schema will contain the
same fields that are present in the ASCII log files::
# sqlite3 /var/db/conn.sqlite
@ -67,35 +68,39 @@ that are present in the ASCII log files::
'id.orig_p' integer,
...
Note that the ASCII ``conn.log`` will still be created. To disable the ASCII writer for a
log stream, you can remove the default filter:
Note that the ASCII ``conn.log`` will still be created. To prevent this file
from being created, you can remove the default filter:
.. code:: bro
Log::remove_filter(Conn::LOG, "default");
To create a custom SQLite log file, you have to create a new log stream that contains
just the information you want to commit to the database. Please refer to the
:ref:`framework-logging` documentation on how to create custom log streams.
To create a custom SQLite log file, you have to create a new log stream
that contains just the information you want to commit to the database.
Please refer to the :ref:`framework-logging` documentation on how to
create custom log streams.
Reading Data from SQLite Databases
==================================
Like logging support, support for reading data from SQLite databases is built into Bro starting
with version 2.2.
Like logging support, support for reading data from SQLite databases is
built into Bro starting with version 2.2.
Just as with the text-based input readers (please refer to the :ref:`framework-input`
documentation for them and for basic information on how to use the input-framework), the SQLite reader
can be used to read data - in this case the result of SQL queries - into tables or into events.
Just as with the text-based input readers (please refer to the
:ref:`framework-input` documentation for them and for basic information
on how to use the input framework), the SQLite reader can be used to
read data - in this case the result of SQL queries - into tables or into
events.
Reading Data into Tables
------------------------
To read data from a SQLite database, we first have to provide Bro with the information, how
the resulting data will be structured. For this example, we expect that we have a SQLite database,
which contains host IP addresses and the user accounts that are allowed to log into a specific
machine.
To read data from a SQLite database, we first have to provide Bro with
the information, how the resulting data will be structured. For this
example, we expect that we have a SQLite database, which contains
host IP addresses and the user accounts that are allowed to log into
a specific machine.
The SQLite commands to create the schema are as follows::
@ -107,8 +112,8 @@ The SQLite commands to create the schema are as follows::
insert into machines_to_users values ('192.168.17.2', 'bernhard');
insert into machines_to_users values ('192.168.17.3', 'seth,matthias');
After creating a file called ``hosts.sqlite`` with this content, we can read the resulting table
into Bro:
After creating a file called ``hosts.sqlite`` with this content, we can
read the resulting table into Bro:
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-read-table.bro
@ -117,22 +122,25 @@ into Bro:
# Make sure this parses correctly at least.
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-read-table.bro
Afterwards, that table can be used to check logins into hosts against the available
userlist.
Afterwards, that table can be used to check logins into hosts against
the available userlist.
Turning Data into Events
------------------------
The second mode is to use the SQLite reader to output the input data as events. Typically there
are two reasons to do this. First, when the structure of the input data is too complicated
for a direct table import. In this case, the data can be read into an event which can then
create the necessary data structures in Bro in scriptland.
The second mode is to use the SQLite reader to output the input data as events.
Typically there are two reasons to do this. First, when the structure of
the input data is too complicated for a direct table import. In this case,
the data can be read into an event which can then create the necessary
data structures in Bro in scriptland.
The second reason is, that the dataset is too big to hold it in memory. In this case, the checks
can be performed on-demand, when Bro encounters a situation where it needs additional information.
The second reason is, that the dataset is too big to hold it in memory. In
this case, the checks can be performed on-demand, when Bro encounters a
situation where it needs additional information.
An example for this would be an internal huge database with malware hashes. Live database queries
could be used to check the sporadically happening downloads against the database.
An example for this would be an internal huge database with malware
hashes. Live database queries could be used to check the sporadically
happening downloads against the database.
The SQLite commands to create the schema are as follows::
@ -151,9 +159,10 @@ The SQLite commands to create the schema are as follows::
insert into malware_hashes values ('73f45106968ff8dc51fba105fa91306af1ff6666', 'ftp-trace');
The following code uses the file-analysis framework to get the sha1 hashes of files that are
transmitted over the network. For each hash, a SQL-query is run against SQLite. If the query
returns with a result, we had a hit against our malware-database and output the matching hash.
The following code uses the file-analysis framework to get the sha1 hashes
of files that are transmitted over the network. For each hash, a SQL-query
is run against SQLite. If the query returns with a result, we had a hit
against our malware-database and output the matching hash.
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-read-events.bro
@ -162,5 +171,5 @@ returns with a result, we had a hit against our malware-database and output the
# Make sure this parses correctly at least.
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-read-events.bro
If you run this script against the trace in ``testing/btest/Traces/ftp/ipv4.trace``, you
will get one hit.
If you run this script against the trace in
``testing/btest/Traces/ftp/ipv4.trace``, you will get one hit.

View file

@ -19,195 +19,144 @@ Terminology
Bro's logging interface is built around three main abstractions:
Log streams
A stream corresponds to a single log. It defines the set of
fields that a log consists of with their names and fields.
Examples are the ``conn`` for recording connection summaries,
Streams
A log stream corresponds to a single log. It defines the set of
fields that a log consists of with their names and types.
Examples are the ``conn`` stream for recording connection summaries,
and the ``http`` stream for recording HTTP activity.
Filters
Each stream has a set of filters attached to it that determine
what information gets written out. By default, each stream has
one default filter that just logs everything directly to disk
with an automatically generated file name. However, further
filters can be added to record only a subset, split a stream
into different outputs, or to even duplicate the log to
multiple outputs. If all filters are removed from a stream,
all output is disabled.
one default filter that just logs everything directly to disk.
However, additional filters can be added to record only a subset
of the log records, write to different outputs, or set a custom
rotation interval. If all filters are removed from a stream,
then output is disabled for that stream.
Writers
A writer defines the actual output format for the information
being logged. At the moment, Bro comes with only one type of
writer, which produces tab separated ASCII files. In the
future we will add further writers, like for binary output and
direct logging into a database.
Each filter has a writer. A writer defines the actual output
format for the information being logged. The default writer is
the ASCII writer, which produces tab-separated ASCII files. Other
writers are available, like for binary output or direct logging
into a database.
Basics
======
There are several different ways to customize Bro's logging: you can create
a new log stream, you can extend an existing log with new fields, you
can apply filters to an existing log stream, or you can customize the output
format by setting log writer options. All of these approaches are
described in this document.
The data fields that a stream records are defined by a record type
specified when it is created. Let's look at the script generating Bro's
connection summaries as an example,
:doc:`/scripts/base/protocols/conn/main.bro`. It defines a record
:bro:type:`Conn::Info` that lists all the fields that go into
``conn.log``, each marked with a ``&log`` attribute indicating that it
is part of the information written out. To write a log record, the
script then passes an instance of :bro:type:`Conn::Info` to the logging
framework's :bro:id:`Log::write` function.
Streams
=======
By default, each stream automatically gets a filter named ``default``
that generates the normal output by recording all record fields into a
single output file.
In order to log data to a new log stream, all of the following needs to be
done:
In the following, we summarize ways in which the logging can be
customized. We continue using the connection summaries as our example
to work with.
- A :bro:type:`record` type must be defined which consists of all the
fields that will be logged (by convention, the name of this record type is
usually "Info").
- A log stream ID (an :bro:type:`enum` with type name "Log::ID") must be
defined that uniquely identifies the new log stream.
- A log stream must be created using the :bro:id:`Log::create_stream` function.
- When the data to be logged becomes available, the :bro:id:`Log::write`
function must be called.
Filtering
---------
To create a new output file for an existing stream, you can add a
new filter. A filter can, e.g., restrict the set of fields being
logged:
In the following example, we create a new module "Foo" which creates
a new log stream.
.. code:: bro
event bro_init()
module Foo;
export {
# Create an ID for our new stream. By convention, this is
# called "LOG".
redef enum Log::ID += { LOG };
# Define the record type that will contain the data to log.
type Info: record {
ts: time &log;
id: conn_id &log;
service: string &log &optional;
missed_bytes: count &log &default=0;
};
}
# Optionally, we can add a new field to the connection record so that
# the data we are logging (our "Info" record) will be easily
# accessible in a variety of event handlers.
redef record connection += {
# By convention, the name of this new field is the lowercase name
# of the module.
foo: Info &optional;
};
# This event is handled at a priority higher than zero so that if
# users modify this stream in another script, they can do so at the
# default priority of zero.
event bro_init() &priority=5
{
# Add a new filter to the Conn::LOG stream that logs only
# timestamp and originator address.
local filter: Log::Filter = [$name="orig-only", $path="origs", $include=set("ts", "id.orig_h")];
Log::add_filter(Conn::LOG, filter);
# Create the stream. This adds a default filter automatically.
Log::create_stream(Foo::LOG, [$columns=Info, $path="foo"]);
}
Note the fields that are set for the filter:
In the definition of the "Info" record above, notice that each field has the
:bro:attr:`&log` attribute. Without this attribute, a field will not appear in
the log output. Also notice one field has the :bro:attr:`&optional` attribute.
This indicates that the field might not be assigned any value before the
log record is written. Finally, a field with the :bro:attr:`&default`
attribute has a default value assigned to it automatically.
``name``
A mandatory name for the filter that can later be used
to manipulate it further.
``path``
The filename for the output file, without any extension (which
may be automatically added by the writer). Default path values
are generated by taking the stream's ID and munging it slightly.
:bro:enum:`Conn::LOG` is converted into ``conn``,
:bro:enum:`PacketFilter::LOG` is converted into
``packet_filter``, and :bro:enum:`Known::CERTS_LOG` is
converted into ``known_certs``.
``include``
A set limiting the fields to the ones given. The names
correspond to those in the :bro:type:`Conn::Info` record, with
sub-records unrolled by concatenating fields (separated with
dots).
Using the code above, you will now get a new log file ``origs.log``
that looks like this::
#separator \x09
#path origs
#fields ts id.orig_h
#types time addr
1128727430.350788 141.42.64.125
1128727435.450898 141.42.64.125
If you want to make this the only log file for the stream, you can
remove the default filter (which, conveniently, has the name
``default``):
At this point, the only thing missing is a call to the :bro:id:`Log::write`
function to send data to the logging framework. The actual event handler
where this should take place will depend on where your data becomes available.
In this example, the :bro:id:`connection_established` event provides our data,
and we also store a copy of the data being logged into the
:bro:type:`connection` record:
.. code:: bro
event bro_init()
event connection_established(c: connection)
{
# Remove the filter called "default".
Log::remove_filter(Conn::LOG, "default");
local rec: Foo::Info = [$ts=network_time(), $id=c$id];
# Store a copy of the data in the connection record so other
# event handlers can access it.
c$foo = rec;
Log::write(Foo::LOG, rec);
}
An alternate approach to "turning off" a log is to completely disable
the stream:
If you run Bro with this script, a new log file ``foo.log`` will be created.
Although we only specified four fields in the "Info" record above, the
log output will actually contain seven fields because one of the fields
(the one named "id") is itself a record type. Since a :bro:type:`conn_id`
record has four fields, then each of these fields is a separate column in
the log output. Note that the way that such fields are named in the log
output differs slightly from the way we would refer to the same field
in a Bro script (each dollar sign is replaced with a period). For example,
to access the first field of a ``conn_id`` in a Bro script we would use
the notation ``id$orig_h``, but that field is named ``id.orig_h``
in the log output.
.. code:: bro
When you are developing scripts that add data to the :bro:type:`connection`
record, care must be given to when and how long data is stored.
Normally data saved to the connection record will remain there for the
duration of the connection and from a practical perspective it's not
uncommon to need to delete that data before the end of the connection.
event bro_init()
{
Log::disable_stream(Conn::LOG);
}
If you want to skip only some fields but keep the rest, there is a
corresponding ``exclude`` filter attribute that you can use instead of
``include`` to list only the ones you are not interested in.
Add Fields to a Log
-------------------
A filter can also determine output paths *dynamically* based on the
record being logged. That allows, e.g., to record local and remote
connections into separate files. To do this, you define a function
that returns the desired path:
You can add additional fields to a log by extending the record
type that defines its content, and setting a value for the new fields
before each log record is written.
.. code:: bro
function split_log(id: Log::ID, path: string, rec: Conn::Info) : string
{
# Return "conn-local" if originator is a local IP, otherwise "conn-remote".
local lr = Site::is_local_addr(rec$id$orig_h) ? "local" : "remote";
return fmt("%s-%s", path, lr);
}
event bro_init()
{
local filter: Log::Filter = [$name="conn-split", $path_func=split_log, $include=set("ts", "id.orig_h")];
Log::add_filter(Conn::LOG, filter);
}
Running this will now produce two files, ``local.log`` and
``remote.log``, with the corresponding entries. One could extend this
further for example to log information by subnets or even by IP
address. Be careful, however, as it is easy to create many files very
quickly ...
.. sidebar:: A More Generic Path Function
The ``split_log`` method has one draw-back: it can be used
only with the :bro:enum:`Conn::LOG` stream as the record type is hardcoded
into its argument list. However, Bro allows to do a more generic
variant:
.. code:: bro
function split_log(id: Log::ID, path: string, rec: record { id: conn_id; } ) : string
{
return Site::is_local_addr(rec$id$orig_h) ? "local" : "remote";
}
This function can be used with all log streams that have records
containing an ``id: conn_id`` field.
While so far we have seen how to customize the columns being logged,
you can also control which records are written out by providing a
predicate that will be called for each log record:
.. code:: bro
function http_only(rec: Conn::Info) : bool
{
# Record only connections with successfully analyzed HTTP traffic
return rec$service == "http";
}
event bro_init()
{
local filter: Log::Filter = [$name="http-only", $path="conn-http", $pred=http_only];
Log::add_filter(Conn::LOG, filter);
}
This will result in a log file ``conn-http.log`` that contains only
traffic detected and analyzed as HTTP traffic.
Extending
---------
You can add further fields to a log stream by extending the record
type that defines its content. Let's say we want to add a boolean
field ``is_private`` to :bro:type:`Conn::Info` that indicates whether the
originator IP address is part of the :rfc:`1918` space:
Let's say we want to add a boolean field ``is_private`` to
:bro:type:`Conn::Info` that indicates whether the originator IP address
is part of the :rfc:`1918` space:
.. code:: bro
@ -218,9 +167,21 @@ originator IP address is part of the :rfc:`1918` space:
is_private: bool &default=F &log;
};
As this example shows, when extending a log stream's "Info" record, each
new field must always be declared either with a ``&default`` value or
as ``&optional``. Furthermore, you need to add the ``&log`` attribute
or otherwise the field won't appear in the log file.
Now we need to set the field. A connection's summary is generated at
the time its state is removed from memory. We can add another handler
Now we need to set the field. Although the details vary depending on which
log is being extended, in general it is important to choose a suitable event
in which to set the additional fields because we need to make sure that
the fields are set before the log record is written. Sometimes the right
choice is the same event which writes the log record, but at a higher
priority (in order to ensure that the event handler that sets the additional
fields is executed before the event handler that writes the log record).
In this example, since a connection's summary is generated at
the time its state is removed from memory, we can add another handler
at that time that sets our field correctly:
.. code:: bro
@ -232,31 +193,58 @@ at that time that sets our field correctly:
}
Now ``conn.log`` will show a new field ``is_private`` of type
``bool``.
``bool``. If you look at the Bro script which defines the connection
log stream :doc:`/scripts/base/protocols/conn/main.bro`, you will see
that ``Log::write`` gets called in an event handler for the
same event as used in this example to set the additional fields, but at a
lower priority than the one used in this example (i.e., the log record gets
written after we assign the ``is_private`` field).
Notes:
For extending logs this way, one needs a bit of knowledge about how
the script that creates the log stream is organizing its state
keeping. Most of the standard Bro scripts attach their log state to
the :bro:type:`connection` record where it can then be accessed, just
like ``c$conn`` above. For example, the HTTP analysis adds a field
``http`` of type :bro:type:`HTTP::Info` to the :bro:type:`connection`
record.
- For extending logs this way, one needs a bit of knowledge about how
the script that creates the log stream is organizing its state
keeping. Most of the standard Bro scripts attach their log state to
the :bro:type:`connection` record where it can then be accessed, just
as the ``c$conn`` above. For example, the HTTP analysis adds a field
``http`` of type :bro:type:`HTTP::Info` to the :bro:type:`connection`
record. See the script reference for more information.
- When extending records as shown above, the new fields must always be
declared either with a ``&default`` value or as ``&optional``.
Furthermore, you need to add the ``&log`` attribute or otherwise the
field won't appear in the output.
Hooking into the Logging
------------------------
Define a Logging Event
----------------------
Sometimes it is helpful to do additional analysis of the information
being logged. For these cases, a stream can specify an event that will
be generated every time a log record is written to it. All of Bro's
default log streams define such an event. For example, the connection
log stream raises the event :bro:id:`Conn::log_conn`. You
be generated every time a log record is written to it. To do this, we
need to modify the example module shown above to look something like this:
.. code:: bro
module Foo;
export {
redef enum Log::ID += { LOG };
type Info: record {
ts: time &log;
id: conn_id &log;
service: string &log &optional;
missed_bytes: count &log &default=0;
};
# Define a logging event. By convention, this is called
# "log_<stream>".
global log_foo: event(rec: Info);
}
event bro_init() &priority=5
{
# Specify the "log_foo" event here in order for Bro to raise it.
Log::create_stream(Foo::LOG, [$columns=Info, $ev=log_foo,
$path="foo"]);
}
All of Bro's default log streams define such an event. For example, the
connection log stream raises the event :bro:id:`Conn::log_conn`. You
could use that for example for flagging when a connection to a
specific destination exceeds a certain duration:
@ -270,7 +258,7 @@ specific destination exceeds a certain duration:
event Conn::log_conn(rec: Conn::Info)
{
if ( rec$duration > 5mins )
if ( rec?$duration && rec$duration > 5mins )
NOTICE([$note=Long_Conn_Found,
$msg=fmt("unusually long conn to %s", rec$id$resp_h),
$id=rec$id]);
@ -281,15 +269,196 @@ externally with Perl scripts. Much of what such an external script
would do later offline, one may instead do directly inside of Bro in
real-time.
Rotation
--------
Disable a Stream
----------------
By default, no log rotation occurs, but it's globally controllable for all
filters by redefining the :bro:id:`Log::default_rotation_interval` option:
One way to "turn off" a log is to completely disable the stream. For
example, the following example will prevent the conn.log from being written:
.. code:: bro
redef Log::default_rotation_interval = 1 hr;
event bro_init()
{
Log::disable_stream(Conn::LOG);
}
Note that this must run after the stream is created, so the priority
of this event handler must be lower than the priority of the event handler
where the stream was created.
Filters
=======
A stream has one or more filters attached to it (a stream without any filters
will not produce any log output). When a stream is created, it automatically
gets a default filter attached to it. This default filter can be removed
or replaced, or other filters can be added to the stream. This is accomplished
by using either the :bro:id:`Log::add_filter` or :bro:id:`Log::remove_filter`
function. This section shows how to use filters to do such tasks as
rename a log file, split the output into multiple files, control which
records are written, and set a custom rotation interval.
Rename Log File
---------------
Normally, the log filename for a given log stream is determined when the
stream is created, unless you explicitly specify a different one by adding
a filter.
The easiest way to change a log filename is to simply replace the
default log filter with a new filter that specifies a value for the "path"
field. In this example, "conn.log" will be changed to "myconn.log":
.. code:: bro
event bro_init()
{
# Replace default filter for the Conn::LOG stream in order to
# change the log filename.
local f = Log::get_filter(Conn::LOG, "default");
f$path = "myconn";
Log::add_filter(Conn::LOG, f);
}
Keep in mind that the "path" field of a log filter never contains the
filename extension. The extension will be determined later by the log writer.
Add a New Log File
------------------
Normally, a log stream writes to only one log file. However, you can
add filters so that the stream writes to multiple files. This is useful
if you want to restrict the set of fields being logged to the new file.
In this example, a new filter is added to the Conn::LOG stream that writes
two fields to a new log file:
.. code:: bro
event bro_init()
{
# Add a new filter to the Conn::LOG stream that logs only
# timestamp and originator address.
local filter: Log::Filter = [$name="orig-only", $path="origs",
$include=set("ts", "id.orig_h")];
Log::add_filter(Conn::LOG, filter);
}
Notice how the "include" filter attribute specifies a set that limits the
fields to the ones given. The names correspond to those in the
:bro:type:`Conn::Info` record (however, because the "id" field is itself a
record, we can specify an individual field of "id" by the dot notation
shown in the example).
Using the code above, in addition to the regular ``conn.log``, you will
now also get a new log file ``origs.log`` that looks like the regular
``conn.log``, but will have only the fields specified in the "include"
filter attribute.
If you want to skip only some fields but keep the rest, there is a
corresponding ``exclude`` filter attribute that you can use instead of
``include`` to list only the ones you are not interested in.
If you want to make this the only log file for the stream, you can
remove the default filter:
.. code:: bro
event bro_init()
{
# Remove the filter called "default".
Log::remove_filter(Conn::LOG, "default");
}
Determine Log Path Dynamically
------------------------------
Instead of using the "path" filter attribute, a filter can determine
output paths *dynamically* based on the record being logged. That
allows, e.g., to record local and remote connections into separate
files. To do this, you define a function that returns the desired path,
and use the "path_func" filter attribute:
.. code:: bro
# Note: if using BroControl then you don't need to redef local_nets.
redef Site::local_nets = { 192.168.0.0/16 };
function myfunc(id: Log::ID, path: string, rec: Conn::Info) : string
{
# Return "conn-local" if originator is a local IP, otherwise
# return "conn-remote".
local r = Site::is_local_addr(rec$id$orig_h) ? "local" : "remote";
return fmt("%s-%s", path, r);
}
event bro_init()
{
local filter: Log::Filter = [$name="conn-split",
$path_func=myfunc, $include=set("ts", "id.orig_h")];
Log::add_filter(Conn::LOG, filter);
}
Running this will now produce two new files, ``conn-local.log`` and
``conn-remote.log``, with the corresponding entries (for this example to work,
the ``Site::local_nets`` must specify your local network). One could extend
this further for example to log information by subnets or even by IP
address. Be careful, however, as it is easy to create many files very
quickly.
The ``myfunc`` function has one drawback: it can be used
only with the :bro:enum:`Conn::LOG` stream as the record type is hardcoded
into its argument list. However, Bro allows to do a more generic
variant:
.. code:: bro
function myfunc(id: Log::ID, path: string,
rec: record { id: conn_id; } ) : string
{
local r = Site::is_local_addr(rec$id$orig_h) ? "local" : "remote";
return fmt("%s-%s", path, r);
}
This function can be used with all log streams that have records
containing an ``id: conn_id`` field.
Filter Log Records
------------------
We have seen how to customize the columns being logged, but
you can also control which records are written out by providing a
predicate that will be called for each log record:
.. code:: bro
function http_only(rec: Conn::Info) : bool
{
# Record only connections with successfully analyzed HTTP traffic
return rec?$service && rec$service == "http";
}
event bro_init()
{
local filter: Log::Filter = [$name="http-only", $path="conn-http",
$pred=http_only];
Log::add_filter(Conn::LOG, filter);
}
This will result in a new log file ``conn-http.log`` that contains only
the log records from ``conn.log`` that are analyzed as HTTP traffic.
Rotation
--------
The log rotation interval is globally controllable for all
filters by redefining the :bro:id:`Log::default_rotation_interval` option
(note that when using BroControl, this option is set automatically via
the BroControl configuration).
Or specifically for certain :bro:type:`Log::Filter` instances by setting
their ``interv`` field. Here's an example of changing just the
@ -301,90 +470,72 @@ their ``interv`` field. Here's an example of changing just the
{
local f = Log::get_filter(Conn::LOG, "default");
f$interv = 1 min;
Log::remove_filter(Conn::LOG, "default");
Log::add_filter(Conn::LOG, f);
}
ASCII Writer Configuration
--------------------------
Writers
=======
The ASCII writer has a number of options for customizing the format of
its output, see :doc:`/scripts/base/frameworks/logging/writers/ascii.bro`.
Each filter has a writer. If you do not specify a writer when adding a
filter to a stream, then the ASCII writer is the default.
Adding Streams
==============
There are two ways to specify a non-default writer. To change the default
writer for all log filters, just redefine the :bro:id:`Log::default_writer`
option. Alternatively, you can specify the writer to use on a per-filter
basis by setting a value for the filter's "writer" field. Consult the
documentation of the writer to use to see if there are other options that are
needed.
It's easy to create a new log stream for custom scripts. Here's an
example for the ``Foo`` module:
ASCII Writer
------------
By default, the ASCII writer outputs log files that begin with several
lines of metadata, followed by the actual log output. The metadata
describes the format of the log file, the "path" of the log (i.e., the log
filename without file extension), and also specifies the time that the log
was created and the time when Bro finished writing to it.
The ASCII writer has a number of options for customizing the format of its
output, see :doc:`/scripts/base/frameworks/logging/writers/ascii.bro`.
If you change the output format options, then be careful to check whether
your postprocessing scripts can still recognize your log files.
Some writer options are global (i.e., they affect all log filters using
that log writer). For example, to change the output format of all ASCII
logs to JSON format:
.. code:: bro
module Foo;
redef LogAscii::use_json = T;
export {
# Create an ID for our new stream. By convention, this is
# called "LOG".
redef enum Log::ID += { LOG };
Some writer options are filter-specific (i.e., they affect only the filters
that explicitly specify the option). For example, to change the output
format of the ``conn.log`` only:
# Define the fields. By convention, the type is called "Info".
type Info: record {
ts: time &log;
id: conn_id &log;
};
.. code:: bro
# Define a hook event. By convention, this is called
# "log_<stream>".
global log_foo: event(rec: Info);
}
# This event should be handled at a higher priority so that when
# users modify your stream later and they do it at priority 0,
# their code runs after this.
event bro_init() &priority=5
event bro_init()
{
# Create the stream. This also adds a default filter automatically.
Log::create_stream(Foo::LOG, [$columns=Info, $ev=log_foo]);
local f = Log::get_filter(Conn::LOG, "default");
# Use tab-separated-value mode
f$config = table(["tsv"] = "T");
Log::add_filter(Conn::LOG, f);
}
You can also add the state to the :bro:type:`connection` record to make
it easily accessible across event handlers:
.. code:: bro
redef record connection += {
foo: Info &optional;
}
Now you can use the :bro:id:`Log::write` method to output log records and
save the logged ``Foo::Info`` record into the connection record:
.. code:: bro
event connection_established(c: connection)
{
local rec: Foo::Info = [$ts=network_time(), $id=c$id];
c$foo = rec;
Log::write(Foo::LOG, rec);
}
See the existing scripts for how to work with such a new connection
field. A simple example is :doc:`/scripts/base/protocols/syslog/main.bro`.
When you are developing scripts that add data to the :bro:type:`connection`
record, care must be given to when and how long data is stored.
Normally data saved to the connection record will remain there for the
duration of the connection and from a practical perspective it's not
uncommon to need to delete that data before the end of the connection.
Other Writers
-------------
Bro supports the following built-in output formats other than ASCII:
Bro supports the following additional built-in output formats:
.. toctree::
:maxdepth: 1
logging-input-sqlite
Further formats are available as external plugins.
Additional writers are available as external plugins:
.. toctree::
:maxdepth: 1
../components/bro-plugins/README

View file

@ -88,15 +88,15 @@ directly make modifications to the :bro:see:`Notice::Info` record
given as the argument to the hook.
Here's a simple example which tells Bro to send an email for all notices of
type :bro:see:`SSH::Password_Guessing` if the server is 10.0.0.1:
type :bro:see:`SSH::Password_Guessing` if the guesser attempted to log in to
the server at 192.168.56.103:
.. code:: bro
.. btest-include:: ${DOC_ROOT}/frameworks/notice_ssh_guesser.bro
hook Notice::policy(n: Notice::Info)
{
if ( n$note == SSH::Password_Guessing && n$id$resp_h == 10.0.0.1 )
add n$actions[Notice::ACTION_EMAIL];
}
.. btest:: notice_ssh_guesser.bro
@TEST-EXEC: btest-rst-cmd bro -C -r ${TRACES}/ssh/sshguess.pcap ${DOC_ROOT}/frameworks/notice_ssh_guesser.bro
@TEST-EXEC: btest-rst-cmd cat notice.log
.. note::
@ -111,10 +111,9 @@ a hook body to run before default hook bodies might look like this:
.. code:: bro
hook Notice::policy(n: Notice::Info) &priority=5
{
if ( n$note == SSH::Password_Guessing && n$id$resp_h == 10.0.0.1 )
add n$actions[Notice::ACTION_EMAIL];
}
{
# Insert your code here.
}
Hooks can also abort later hook bodies with the ``break`` keyword. This
is primarily useful if one wants to completely preempt processing by

View file

@ -0,0 +1,10 @@
@load protocols/ssh/detect-bruteforcing
redef SSH::password_guesses_limit=10;
hook Notice::policy(n: Notice::Info)
{
if ( n$note == SSH::Password_Guessing && /192\.168\.56\.103/ in n$sub )
add n$actions[Notice::ACTION_EMAIL];
}

View file

@ -7,15 +7,18 @@ global mime_to_ext: table[string] of string = {
["text/html"] = "html",
};
event file_mime_type(f: fa_file, mime_type: string)
event file_sniff(f: fa_file, meta: fa_metadata)
{
if ( f$source != "HTTP" )
return;
if ( mime_type !in mime_to_ext )
if ( ! meta?$mime_type )
return;
local fname = fmt("%s-%s.%s", f$source, f$id, mime_to_ext[mime_type]);
if ( meta$mime_type !in mime_to_ext )
return;
local fname = fmt("%s-%s.%s", f$source, f$id, mime_to_ext[meta$mime_type]);
print fmt("Extracting file %s", fname);
Files::add_analyzer(f, Files::ANALYZER_EXTRACT, [$extract_filename=fname]);
}

View file

@ -8,10 +8,12 @@ How to Upgrade
If you're doing an upgrade install (rather than a fresh install),
there's two suggested approaches: either install Bro using the same
installation prefix directory as before, or pick a new prefix and copy
local customizations over. Regardless of which approach you choose,
if you are using BroControl, then after upgrading Bro you will need to
run "broctl check" (to verify that your new configuration is OK)
and "broctl install" to complete the upgrade process.
local customizations over.
Regardless of which approach you choose, if you are using BroControl, then
before doing the upgrade you should stop all running Bro processes with the
"broctl stop" command. After the upgrade is complete then you will need
to run "broctl deploy".
In the following we summarize general guidelines for upgrading, see
the :ref:`release-notes` for version-specific information.
@ -44,4 +46,4 @@ where Bro was originally installed). Review the files for differences
before copying and make adjustments as necessary (use the new version for
differences that aren't a result of a local change). Of particular note,
the copied version of ``$prefix/etc/broctl.cfg`` is likely to need changes
to the ``SpoolDir`` and ``LogDir`` settings.
to any settings that specify a pathname.

View file

@ -4,7 +4,7 @@
.. _MacPorts: http://www.macports.org
.. _Fink: http://www.finkproject.org
.. _Homebrew: http://brew.sh
.. _bro downloads page: http://bro.org/download/index.html
.. _bro downloads page: https://www.bro.org/download/index.html
.. _installing-bro:
@ -32,22 +32,24 @@ before you begin:
* Libz
* Bash (for BroControl)
* Python (for BroControl)
* C++ Actor Framework (CAF) version 0.14 (http://actor-framework.org)
To build Bro from source, the following additional dependencies are required:
* CMake 2.8 or greater (http://www.cmake.org)
* Make
* C/C++ compiler
* C/C++ compiler with C++11 support (GCC 4.8+ or Clang 3.3+)
* SWIG (http://www.swig.org)
* Bison (GNU Parser Generator)
* Flex (Fast Lexical Analyzer)
* Libpcap headers (http://www.tcpdump.org)
* OpenSSL headers (http://www.openssl.org)
* zlib headers
* Perl
* Python
To install the required dependencies, you can use (when done, make sure
that ``bash`` and ``python`` are in your ``PATH``):
To install CAF, first download the source code of the required version from: https://github.com/actor-framework/actor-framework/releases
To install the required dependencies, you can use:
* RPM/RedHat-based Linux:
@ -68,19 +70,41 @@ that ``bash`` and ``python`` are in your ``PATH``):
.. console::
sudo pkg_add -r bash cmake swig bison python perl
sudo pkg install bash cmake swig bison python py27-sqlite3
Note that in older versions of FreeBSD, you might have to use the
"pkg_add -r" command instead of "pkg install".
For older versions of FreeBSD (especially FreeBSD 9.x), the system compiler
is not new enough to compile Bro. For these systems, you will have to install
a newer compiler using pkg; the ``clang34`` package should work.
You will also have to define several environment variables on these older
systems to use the new compiler and headers similar to this before calling
configure:
.. console::
export CC=clang34
export CXX=clang++34
export CXXFLAGS="-stdlib=libc++ -I${LOCALBASE}/include/c++/v1 -L${LOCALBASE}/lib"
export LDFLAGS="-pthread"
* Mac OS X:
Compiling source code on Macs requires first downloading Xcode_,
then going through its "Preferences..." -> "Downloads" menus to
install the "Command Line Tools" component.
Compiling source code on Macs requires first installing Xcode_ (in older
versions of Xcode, you would then need to go through its
"Preferences..." -> "Downloads" menus to install the "Command Line Tools"
component).
OS X comes with all required dependencies except for CMake_ and SWIG_.
Distributions of these dependencies can likely be obtained from your
preferred Mac OS X package management system (e.g. MacPorts_, Fink_,
or Homebrew_). Specifically for MacPorts, the ``cmake``, ``swig``,
and ``swig-python`` packages provide the required dependencies.
OS X comes with all required dependencies except for CMake_, SWIG_,
OpenSSL, and CAF. (OpenSSL used to be part of OS X versions 10.10
and older, for which it does not need to be installed manually. It
was removed in OS X 10.11). Distributions of these dependencies can
likely be obtained from your preferred Mac OS X package management
system (e.g. Homebrew_, MacPorts_, or Fink_). Specifically for
Homebrew, the ``cmake``, ``swig``, ``openssl`` and ``caf`` packages
provide the required dependencies.
Optional Dependencies
@ -93,8 +117,9 @@ build time:
* sendmail (enables Bro and BroControl to send mail)
* curl (used by a Bro script that implements active HTTP)
* gperftools (tcmalloc is used to improve memory and CPU usage)
* jemalloc (http://www.canonware.com/jemalloc/)
* PF_RING (Linux only, see :doc:`Cluster Configuration <../configuration/index>`)
* ipsumdump (for trace-summary; http://www.cs.ucla.edu/~kohler/ipsumdump)
* Ruby executable, library, and headers (for Broccoli Ruby bindings)
LibGeoIP is probably the most interesting and can be installed
on most platforms by following the instructions for :ref:`installing
@ -110,40 +135,30 @@ code forms.
Using Pre-Built Binary Release Packages
=======================================
---------------------------------------
See the `bro downloads page`_ for currently supported/targeted
platforms for binary releases.
platforms for binary releases and for installation instructions.
* RPM
* Linux Packages
.. console::
sudo yum localinstall Bro-*.rpm
* DEB
.. console::
sudo gdebi Bro-*.deb
* MacOS Disk Image with Installer
Just open the ``Bro-*.dmg`` and then run the ``.pkg`` installer.
Everything installed by the package will go into ``/opt/bro``.
Linux based binary installations are usually performed by adding
information about the Bro packages to the respective system packaging
tool. Then the usual system utilities such as ``apt``, ``dnf``, ``yum``,
or ``zypper`` are used to perform the installation.
The primary install prefix for binary packages is ``/opt/bro``.
Non-MacOS packages that include BroControl also put variable/runtime
data (e.g. Bro logs) in ``/var/opt/bro``.
Installing from Source
==========================
----------------------
Bro releases are bundled into source packages for convenience and are
available on the `bro downloads page`_. Alternatively, the latest
Bro development version can be obtained through git repositories
available on the `bro downloads page`_.
Alternatively, the latest Bro development version
can be obtained through git repositories
hosted at ``git.bro.org``. See our `git development documentation
<http://bro.org/development/howtos/process.html>`_ for comprehensive
<https://www.bro.org/development/howtos/process.html>`_ for comprehensive
information on Bro's use of git revision control, but the short story
for downloading the full source code experience for Bro via git is:
@ -164,13 +179,23 @@ run ``./configure --help``):
make
make install
If the ``configure`` script fails, then it is most likely because it either
couldn't find a required dependency or it couldn't find a sufficiently new
version of a dependency. Assuming that you already installed all required
dependencies, then you may need to use one of the ``--with-*`` options
that can be given to the ``configure`` script to help it locate a dependency.
The default installation path is ``/usr/local/bro``, which would typically
require root privileges when doing the ``make install``. A different
installation path can be chosen by specifying the ``--prefix`` option.
Note that ``/usr`` and ``/opt/bro`` are the
installation path can be chosen by specifying the ``configure`` script
``--prefix`` option. Note that ``/usr`` and ``/opt/bro`` are the
standard prefixes for binary Bro packages to be installed, so those are
typically not good choices unless you are creating such a package.
OpenBSD users, please see our `FAQ
<https://www.bro.org/documentation/faq.html>`_ if you are having
problems installing Bro.
Depending on the Bro package you downloaded, there may be auxiliary
tools and libraries available in the ``aux/`` directory. Some of them
will be automatically built and installed along with Bro. There are
@ -179,10 +204,6 @@ turn off unwanted auxiliary projects that would otherwise be installed
automatically. Finally, use ``make install-aux`` to install some of
the other programs that are in the ``aux/bro-aux`` directory.
OpenBSD users, please see our `FAQ
<//www.bro.org/documentation/faq.html>`_ if you are having
problems installing Bro.
Finally, if you want to build the Bro documentation (not required, because
all of the documentation for the latest Bro release is available on the
Bro web site), there are instructions in ``doc/README`` in the source
@ -191,7 +212,7 @@ distribution.
Configure the Run-Time Environment
==================================
Just remember that you may need to adjust your ``PATH`` environment variable
You may want to adjust your ``PATH`` environment variable
according to the platform/shell/package you're using. For example:
Bourne-Shell Syntax:

View file

@ -30,7 +30,7 @@ export {
event bro_init() &priority=3
{
Log::create_stream(MimeMetrics::LOG, [$columns=Info]);
Log::create_stream(MimeMetrics::LOG, [$columns=Info, $path="mime_metrics"]);
local r1: SumStats::Reducer = [$stream="mime.bytes",
$apply=set(SumStats::SUM)];
local r2: SumStats::Reducer = [$stream="mime.hits",

View file

@ -0,0 +1,24 @@
@load protocols/ssl/expiring-certs
const watched_servers: set[addr] = {
87.98.220.10,
} &redef;
# Site::local_nets usually isn't something you need to modify if
# BroControl automatically sets it up from networks.cfg. It's
# shown here for completeness.
redef Site::local_nets += {
87.98.0.0/16,
};
hook Notice::policy(n: Notice::Info)
{
if ( n$note != SSL::Certificate_Expired )
return;
if ( n$id$resp_h !in watched_servers )
return;
add n$actions[Notice::ACTION_EMAIL];
}

View file

@ -24,9 +24,10 @@ Managing Bro with BroControl
BroControl is an interactive shell for easily operating/managing Bro
installations on a single system or even across multiple systems in a
traffic-monitoring cluster. This section explains how to use BroControl
to manage a stand-alone Bro installation. For instructions on how to
configure a Bro cluster, see the :doc:`Cluster Configuration
<../configuration/index>` documentation.
to manage a stand-alone Bro installation. For a complete reference on
BroControl, see the :doc:`BroControl <../components/broctl/README>`
documentation. For instructions on how to configure a Bro cluster,
see the :doc:`Cluster Configuration <../configuration/index>` documentation.
A Minimal Starting Configuration
--------------------------------
@ -156,9 +157,11 @@ changes we want to make:
notice that means an SSL connection was established and the server's
certificate couldn't be validated using Bro's default trust roots, but
we want to ignore it.
2) ``SSH::Login`` is a notice type that is triggered when an SSH connection
attempt looks like it may have been successful, and we want email when
that happens, but only for certain servers.
2) ``SSL::Certificate_Expired`` is a notice type that is triggered when
an SSL connection was established using an expired certificate. We
want email when that happens, but only for certain servers on the
local network (Bro can also proactively monitor for certs that will
soon expire, but this is just for demonstration purposes).
We've defined *what* we want to do, but need to know *where* to do it.
The answer is to use a script written in the Bro programming language, so
@ -203,7 +206,7 @@ the variable's value may not change at run-time, but whose initial value can be
modified via the ``redef`` operator at parse-time.
Let's continue on our path to modify the behavior for the two SSL
and SSH notices. Looking at :doc:`/scripts/base/frameworks/notice/main.bro`,
notices. Looking at :doc:`/scripts/base/frameworks/notice/main.bro`,
we see that it advertises:
.. code:: bro
@ -216,7 +219,7 @@ we see that it advertises:
const ignored_types: set[Notice::Type] = {} &redef;
}
That's exactly what we want to do for the SSL notice. Add to ``local.bro``:
That's exactly what we want to do for the first notice. Add to ``local.bro``:
.. code:: bro
@ -248,38 +251,30 @@ is valid before installing it and then restarting the Bro instance:
stopping bro ...
starting bro ...
Now that the SSL notice is ignored, let's look at how to send an email on
the SSH notice. The notice framework has a similar option called
``emailed_types``, but using that would generate email for all SSH servers and
we only want email for logins to certain ones. There is a ``policy`` hook
that is actually what is used to implement the simple functionality of
``ignored_types`` and
``emailed_types``, but it's extensible such that the condition and action taken
on notices can be user-defined.
Now that the SSL notice is ignored, let's look at how to send an email
on the other notice. The notice framework has a similar option called
``emailed_types``, but using that would generate email for all SSL
servers with expired certificates and we only want email for connections
to certain ones. There is a ``policy`` hook that is actually what is
used to implement the simple functionality of ``ignored_types`` and
``emailed_types``, but it's extensible such that the condition and
action taken on notices can be user-defined.
In ``local.bro``, let's define a new ``policy`` hook handler body
that takes the email action for SSH logins only for a defined set of servers:
In ``local.bro``, let's define a new ``policy`` hook handler body:
.. code:: bro
.. btest-include:: ${DOC_ROOT}/quickstart/conditional-notice.bro
const watched_servers: set[addr] = {
192.168.1.100,
192.168.1.101,
192.168.1.102,
} &redef;
.. btest:: conditional-notice
hook Notice::policy(n: Notice::Info)
{
if ( n$note == SSH::SUCCESSFUL_LOGIN && n$id$resp_h in watched_servers )
add n$actions[Notice::ACTION_EMAIL];
}
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/tls/tls-expired-cert.trace ${DOC_ROOT}/quickstart/conditional-notice.bro
@TEST-EXEC: btest-rst-cmd cat notice.log
You'll just have to trust the syntax for now, but what we've done is
first declare our own variable to hold a set of watched addresses,
``watched_servers``; then added a hook handler body to the policy that will
generate an email whenever the notice type is an SSH login and the responding
host stored
inside the ``Info`` record's connection field is in the set of watched servers.
``watched_servers``; then added a hook handler body to the policy that
will generate an email whenever the notice type is an SSL expired
certificate and the responding host stored inside the ``Info`` record's
connection field is in the set of watched servers.
.. note:: Record field member access is done with the '$' character
instead of a '.' as might be expected from other languages, in

View file

@ -43,8 +43,6 @@ The Bro scripting language supports the following attributes.
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&mergeable` |Prefer set union for synchronized state. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&group` |Group event handlers to activate/deactivate. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&error_handler` |Used internally for reporter framework events. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&type_column` |Used by input framework for "port" type. |
@ -56,13 +54,16 @@ Here is a more detailed explanation of each attribute:
.. bro:attr:: &redef
Allows for redefinition of initial values of global objects declared as
constant.
In this example, the constant (assuming it is global) can be redefined
with a :bro:keyword:`redef` at some later point::
Allows use of a :bro:keyword:`redef` to redefine initial values of
global variables (i.e., variables declared either :bro:keyword:`global`
or :bro:keyword:`const`). Example::
const clever = T &redef;
global cache_size = 256 &redef;
Note that a variable declared "global" can also have its value changed
with assignment statements (doesn't matter if it has the "&redef"
attribute or not).
.. bro:attr:: &priority
@ -175,14 +176,20 @@ Here is a more detailed explanation of each attribute:
Rotates a file after a specified interval.
Note: This attribute is deprecated and will be removed in a future release.
.. bro:attr:: &rotate_size
Rotates a file after it has reached a given size in bytes.
Note: This attribute is deprecated and will be removed in a future release.
.. bro:attr:: &encrypt
Encrypts files right before writing them to disk.
Note: This attribute is deprecated and will be removed in a future release.
.. bro:attr:: &raw_output
Opens a file in raw mode, i.e., non-ASCII characters are not
@ -198,11 +205,6 @@ Here is a more detailed explanation of each attribute:
inconsistencies and can be avoided by unifying the two sets, rather
than merely overwriting the old value.
.. bro:attr:: &group
Groups event handlers such that those in the same group can be
jointly activated or deactivated.
.. bro:attr:: &error_handler
Internally set on the events that are associated with the reporter
@ -236,5 +238,4 @@ Here is a more detailed explanation of each attribute:
The associated identifier is marked as deprecated and will be
removed in a future version of Bro. Look in the NEWS file for more
explanation and/or instructions to migrate code that uses deprecated
functionality.
instructions to migrate code that uses deprecated functionality.

View file

@ -58,6 +58,23 @@ executed. Directives are evaluated before script execution begins.
for that script are ignored).
.. bro:keyword:: @load-plugin
Activate a dynamic plugin with the specified plugin name. The specified
plugin must be located in Bro's plugin search path. Example::
@load-plugin Demo::Rot13
By default, Bro will automatically activate all dynamic plugins found
in the plugin search path (the search path can be changed by setting
the environment variable BRO_PLUGIN_PATH to a colon-separated list of
directories). However, in bare mode ("bro -b"), dynamic plugins can be
activated only by using "@load-plugin", or by specifying the full
plugin name on the Bro command-line (e.g., "bro Demo::Rot13"), or by
setting the environment variable BRO_PLUGIN_ACTIVATE to a
comma-separated list of plugin names.
.. bro:keyword:: @load-sigs
This works similarly to "@load", except that in this case the filename

View file

@ -26,13 +26,21 @@ Network Protocols
+----------------------------+---------------------------------------+---------------------------------+
| irc.log | IRC commands and responses | :bro:type:`IRC::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| kerberos.log | Kerberos | :bro:type:`KRB::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| modbus.log | Modbus commands and responses | :bro:type:`Modbus::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| modbus_register_change.log | Tracks changes to Modbus holding | :bro:type:`Modbus::MemmapInfo` |
| | registers | |
+----------------------------+---------------------------------------+---------------------------------+
| mysql.log | MySQL | :bro:type:`MySQL::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| radius.log | RADIUS authentication attempts | :bro:type:`RADIUS::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| rdp.log | RDP | :bro:type:`RDP::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| sip.log | SIP | :bro:type:`SIP::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| smtp.log | SMTP transactions | :bro:type:`SMTP::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| snmp.log | SNMP messages | :bro:type:`SNMP::Info` |
@ -56,6 +64,8 @@ Files
+============================+=======================================+=================================+
| files.log | File analysis results | :bro:type:`Files::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| pe.log | Portable Executable (PE) | :bro:type:`PE::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| x509.log | X.509 certificate info | :bro:type:`X509::Info` |
+----------------------------+---------------------------------------+---------------------------------+

View file

@ -71,9 +71,11 @@ Statements
Declarations
------------
The following global declarations cannot occur within a function, hook, or
event handler. Also, these declarations cannot appear after any statements
that are outside of a function, hook, or event handler.
Declarations cannot occur within a function, hook, or event handler.
Declarations must appear before any statements (except those statements
that are in a function, hook, or event handler) in the concatenation of
all loaded Bro scripts.
.. bro:keyword:: module
@ -126,9 +128,12 @@ that are outside of a function, hook, or event handler.
.. bro:keyword:: global
Variables declared with the "global" keyword will be global.
If a type is not specified, then an initializer is required so that
the type can be inferred. Likewise, if an initializer is not supplied,
then the type must be specified. Example::
then the type must be specified. In some cases, when the type cannot
be correctly inferred, the type must be specified even when an
initializer is present. Example::
global pi = 3.14;
global hosts: set[addr];
@ -136,10 +141,11 @@ that are outside of a function, hook, or event handler.
Variable declarations outside of any function, hook, or event handler are
required to use this keyword (unless they are declared with the
:bro:keyword:`const` keyword). Definitions of functions, hooks, and
event handlers are not allowed to use the "global"
keyword (they already have global scope), except function declarations
where no function body is supplied use the "global" keyword.
:bro:keyword:`const` keyword instead).
Definitions of functions, hooks, and event handlers are not allowed
to use the "global" keyword. However, function declarations (i.e., no
function body is provided) can use the "global" keyword.
The scope of a global variable begins where the declaration is located,
and extends through all remaining Bro scripts that are loaded (however,
@ -150,18 +156,22 @@ that are outside of a function, hook, or event handler.
.. bro:keyword:: const
A variable declared with the "const" keyword will be constant.
Variables declared as constant are required to be initialized at the
time of declaration. Example::
time of declaration. Normally, the type is inferred from the initializer,
but the type can be explicitly specified. Example::
const pi = 3.14;
const ssh_port: port = 22/tcp;
The value of a constant cannot be changed later (the only
exception is if the variable is global and has the :bro:attr:`&redef`
attribute, then its value can be changed only with a :bro:keyword:`redef`).
The value of a constant cannot be changed. The only exception is if the
variable is a global constant and has the :bro:attr:`&redef`
attribute, but even then its value can be changed only with a
:bro:keyword:`redef`.
The scope of a constant is local if the declaration is in a
function, hook, or event handler, and global otherwise.
Note that the "const" keyword cannot be used with either the "local"
or "global" keywords (i.e., "const" replaces "local" and "global").
@ -184,7 +194,8 @@ that are outside of a function, hook, or event handler.
.. bro:keyword:: redef
There are three ways that "redef" can be used: to change the value of
a global variable, to extend a record type or enum type, or to specify
a global variable (but only if it has the :bro:attr:`&redef` attribute),
to extend a record type or enum type, or to specify
a new event handler body that replaces all those that were previously
defined.
@ -237,13 +248,14 @@ that are outside of a function, hook, or event handler.
Statements
----------
Statements (except those contained within a function, hook, or event
handler) can appear only after all global declarations in the concatenation
of all loaded Bro scripts.
Each statement in a Bro script must be terminated with a semicolon (with a
few exceptions noted below). An individual statement can span multiple
lines.
All statements (except those contained within a function, hook, or event
handler) must appear after all global declarations.
Here are the statements that the Bro scripting language supports.
.. bro:keyword:: add
@ -258,8 +270,8 @@ Here are the statements that the Bro scripting language supports.
.. bro:keyword:: break
The "break" statement is used to break out of a :bro:keyword:`switch` or
:bro:keyword:`for` statement.
The "break" statement is used to break out of a :bro:keyword:`switch`,
:bro:keyword:`for`, or :bro:keyword:`while` statement.
.. bro:keyword:: delete
@ -294,7 +306,10 @@ Here are the statements that the Bro scripting language supports.
.. bro:keyword:: for
A "for" loop iterates over each element in a string, set, vector, or
table and executes a statement for each iteration.
table and executes a statement for each iteration. Currently,
modifying a container's membership while iterating over it may
result in undefined behavior, so avoid adding or removing elements
inside the loop.
For each iteration of the loop, a loop variable will be assigned to an
element if the expression evaluates to a string or set, or an index if
@ -376,10 +391,10 @@ Here are the statements that the Bro scripting language supports.
.. bro:keyword:: next
The "next" statement can only appear within a :bro:keyword:`for` loop.
It causes execution to skip to the next iteration.
The "next" statement can only appear within a :bro:keyword:`for` or
:bro:keyword:`while` loop. It causes execution to skip to the next
iteration.
For an example, see the :bro:keyword:`for` statement.
.. bro:keyword:: print
@ -568,7 +583,7 @@ Here are the statements that the Bro scripting language supports.
.. bro:keyword:: while
A "while" loop iterates over a body statement as long a given
A "while" loop iterates over a body statement as long as a given
condition remains true.
A :bro:keyword:`break` statement can be used at any time to immediately
@ -606,8 +621,8 @@ Here are the statements that the Bro scripting language supports.
(outside of the braces) of a compound statement.
A compound statement is required in order to execute more than one
statement in the body of a :bro:keyword:`for`, :bro:keyword:`if`, or
:bro:keyword:`when` statement.
statement in the body of a :bro:keyword:`for`, :bro:keyword:`while`,
:bro:keyword:`if`, or :bro:keyword:`when` statement.
Example::

View file

@ -340,15 +340,18 @@ Here is a more detailed description of each type:
table [ type^+ ] of type
where *type^+* is one or more types, separated by commas.
For example:
where *type^+* is one or more types, separated by commas. The
index type cannot be any of the following types: pattern, table, set,
vector, file, opaque, any.
Here is an example of declaring a table indexed by "count" values
and yielding "string" values:
.. code:: bro
global a: table[count] of string;
declares a table indexed by "count" values and yielding
"string" values. The yield type can also be more complex:
The yield type can also be more complex:
.. code:: bro
@ -441,7 +444,9 @@ Here is a more detailed description of each type:
set [ type^+ ]
where *type^+* is one or more types separated by commas.
where *type^+* is one or more types separated by commas. The
index type cannot be any of the following types: pattern, table, set,
vector, file, opaque, any.
Sets can be initialized by listing elements enclosed by curly braces:

View file

@ -4,7 +4,7 @@ type Service: record {
rfc: count;
};
function print_service(serv: Service): string
function print_service(serv: Service)
{
print fmt("Service: %s(RFC%d)",serv$name, serv$rfc);

View file

@ -9,7 +9,7 @@ type System: record {
services: set[Service];
};
function print_service(serv: Service): string
function print_service(serv: Service)
{
print fmt(" Service: %s(RFC%d)",serv$name, serv$rfc);
@ -17,7 +17,7 @@ function print_service(serv: Service): string
print fmt(" port: %s", p);
}
function print_system(sys: System): string
function print_system(sys: System)
{
print fmt("System: %s", sys$name);

View file

@ -23,7 +23,7 @@ function factorial(n: count): count
event bro_init()
{
# Create the logging stream.
Log::create_stream(LOG, [$columns=Info]);
Log::create_stream(LOG, [$columns=Info, $path="factor"]);
}
event bro_done()

View file

@ -37,7 +37,7 @@ function mod5(id: Log::ID, path: string, rec: Factor::Info) : string
event bro_init()
{
Log::create_stream(LOG, [$columns=Info]);
Log::create_stream(LOG, [$columns=Info, $path="factor"]);
local filter: Log::Filter = [$name="split-mod5s", $path_func=mod5];
Log::add_filter(Factor::LOG, filter);

View file

@ -22,7 +22,7 @@ function factorial(n: count): count
event bro_init()
{
Log::create_stream(LOG, [$columns=Info, $ev=log_factor]);
Log::create_stream(LOG, [$columns=Info, $ev=log_factor, $path="factor"]);
}
event bro_done()

View file

@ -363,7 +363,7 @@ decrypted from HTTP streams is stored in
excerpt from :doc:`/scripts/base/protocols/http/main.bro` below.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/base/protocols/http/main.bro
:lines: 9-11,20-22,121
:lines: 9-11,20-22,125
Because the constant was declared with the ``&redef`` attribute, if we
needed to turn this option on globally, we could do so by adding the
@ -826,7 +826,7 @@ example of the ``record`` data type in the earlier sections, the
``conn.log``, is shown by the excerpt below.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/base/protocols/conn/main.bro
:lines: 10-12,16-17,19,21,23,25,28,31,35,38,57,63,69,92,95,99,102,106,110-111,116
:lines: 10-12,16-17,19,21,23,25,28,31,35,38,57,63,69,75,98,101,105,108,112,116-117,122
Looking at the structure of the definition, a new collection of data
types is being defined as a type called ``Info``. Since this type

View file

@ -51,12 +51,6 @@ add given prefix to policy file resolution
\fB\-r\fR,\ \-\-readfile <readfile>
read from given tcpdump file
.TP
\fB\-y\fR,\ \-\-flowfile <file>[=<ident>]
read from given flow file
.TP
\fB\-Y\fR,\ \-\-netflow <ip>:<prt>[=<id>]
read flow from socket
.TP
\fB\-s\fR,\ \-\-rulefile <rulefile>
read rules from given file
.TP
@ -78,27 +72,21 @@ run the specified policy file analysis
\fB\-C\fR,\ \-\-no\-checksums
ignore checksums
.TP
\fB\-D\fR,\ \-\-dfa\-size <size>
DFA state cache size
.TP
\fB\-F\fR,\ \-\-force\-dns
force DNS
.TP
\fB\-I\fR,\ \-\-print\-id <ID name>
print out given ID
.TP
\fB\-J\fR,\ \-\-set\-seed <seed>
set the random number seed
.TP
\fB\-K\fR,\ \-\-md5\-hashkey <hashkey>
set key for MD5\-keyed hashing
.TP
\fB\-L\fR,\ \-\-rule\-benchmark
benchmark for rules
.TP
\fB\-N\fR,\ \-\-print\-plugins
print available plugins and exit (\fB\-NN\fR for verbose)
.TP
\fB\-O\fR,\ \-\-optimize
optimize policy script
.TP
\fB\-P\fR,\ \-\-prime\-dns
prime DNS
.TP
@ -120,7 +108,7 @@ Record process status in file
\fB\-W\fR,\ \-\-watchdog
activate watchdog timer
.TP
\fB\-X\fR,\ \-\-broxygen
\fB\-X\fR,\ \-\-broxygen <cfgfile>
generate documentation based on config file
.TP
\fB\-\-pseudo\-realtime[=\fR<speedup>]
@ -131,6 +119,19 @@ load seeds from given file
.TP
\fB\-\-save\-seeds\fR <file>
save seeds to given file
.TP
The following option is available only when Bro is built with the \-\-enable\-debug configure option:
.TP
\fB\-B\fR,\ \-\-debug <dbgstreams>
Enable debugging output for selected streams ('-B help' for help)
.TP
The following options are available only when Bro is built with gperftools support (use the \-\-enable\-perftools and \-\-enable\-perftools\-debug configure options):
.TP
\fB\-m\fR,\ \-\-mem-leaks
show leaks
.TP
\fB\-M\fR,\ \-\-mem-profile
record heap
.SH ENVIRONMENT
.TP
.B BROPATH

View file

@ -1,14 +0,0 @@
#!/bin/sh
# CMake/CPack versions before 2.8.3 have bugs that can create bad packages
# Since packages will be built on several different systems, a single
# version of CMake is required to obtain consistency, but can be increased
# as new versions of CMake come out that also produce working packages.
CMAKE_PACK_REQ="cmake version 2.8.6"
CMAKE_VER=`cmake -version`
if [ "${CMAKE_VER}" != "${CMAKE_PACK_REQ}" ]; then
echo "Package creation requires ${CMAKE_PACK_REQ}" >&2
exit 1
fi

View file

@ -3,8 +3,6 @@
# This script generates binary DEB packages.
# They can be found in ../build/ after running.
./check-cmake || { exit 1; }
# The DEB CPack generator depends on `dpkg-shlibdeps` to automatically
# determine what dependencies to set for the packages
type dpkg-shlibdeps > /dev/null 2>&1 || {

View file

@ -3,14 +3,6 @@
# This script creates binary packages for Mac OS X.
# They can be found in ../build/ after running.
cmake -P /dev/stdin << "EOF"
if ( ${CMAKE_VERSION} VERSION_LESS 2.8.9 )
message(FATAL_ERROR "CMake >= 2.8.9 required to build package")
endif ()
EOF
[ $? -ne 0 ] && exit 1;
type sw_vers > /dev/null 2>&1 || {
echo "Unable to get Mac OS X version" >&2;
exit 1;

View file

@ -3,8 +3,6 @@
# This script generates binary RPM packages.
# They can be found in ../build/ after running.
./check-cmake || { exit 1; }
# The RPM CPack generator depends on `rpmbuild` to create packages
type rpmbuild > /dev/null 2>&1 || {
echo "\

View file

@ -53,7 +53,8 @@ function set_limit(f: fa_file, args: Files::AnalyzerArgs, n: count): bool
function on_add(f: fa_file, args: Files::AnalyzerArgs)
{
if ( ! args?$extract_filename )
args$extract_filename = cat("extract-", f$source, "-", f$id);
args$extract_filename = cat("extract-", f$last_active, "-", f$source,
"-", f$id);
f$info$extracted = args$extract_filename;
args$extract_filename = build_path_compressed(prefix, args$extract_filename);

View file

@ -0,0 +1 @@
Support for Portable Executable (PE) file analysis.

View file

@ -0,0 +1,2 @@
@load ./consts
@load ./main

View file

@ -0,0 +1,184 @@
module PE;
export {
const machine_types: table[count] of string = {
[0x00] = "UNKNOWN",
[0x1d3] = "AM33",
[0x8664] = "AMD64",
[0x1c0] = "ARM",
[0x1c4] = "ARMNT",
[0xaa64] = "ARM64",
[0xebc] = "EBC",
[0x14c] = "I386",
[0x200] = "IA64",
[0x9041] = "M32R",
[0x266] = "MIPS16",
[0x366] = "MIPSFPU",
[0x466] = "MIPSFPU16",
[0x1f0] = "POWERPC",
[0x1f1] = "POWERPCFP",
[0x166] = "R4000",
[0x1a2] = "SH3",
[0x1a3] = "SH3DSP",
[0x1a6] = "SH4",
[0x1a8] = "SH5",
[0x1c2] = "THUMB",
[0x169] = "WCEMIPSV2"
} &default=function(i: count):string { return fmt("unknown-%d", i); };
const file_characteristics: table[count] of string = {
[0x1] = "RELOCS_STRIPPED",
[0x2] = "EXECUTABLE_IMAGE",
[0x4] = "LINE_NUMS_STRIPPED",
[0x8] = "LOCAL_SYMS_STRIPPED",
[0x10] = "AGGRESSIVE_WS_TRIM",
[0x20] = "LARGE_ADDRESS_AWARE",
[0x80] = "BYTES_REVERSED_LO",
[0x100] = "32BIT_MACHINE",
[0x200] = "DEBUG_STRIPPED",
[0x400] = "REMOVABLE_RUN_FROM_SWAP",
[0x800] = "NET_RUN_FROM_SWAP",
[0x1000] = "SYSTEM",
[0x2000] = "DLL",
[0x4000] = "UP_SYSTEM_ONLY",
[0x8000] = "BYTES_REVERSED_HI"
} &default=function(i: count):string { return fmt("unknown-%d", i); };
const dll_characteristics: table[count] of string = {
[0x40] = "DYNAMIC_BASE",
[0x80] = "FORCE_INTEGRITY",
[0x100] = "NX_COMPAT",
[0x200] = "NO_ISOLATION",
[0x400] = "NO_SEH",
[0x800] = "NO_BIND",
[0x2000] = "WDM_DRIVER",
[0x8000] = "TERMINAL_SERVER_AWARE"
} &default=function(i: count):string { return fmt("unknown-%d", i); };
const windows_subsystems: table[count] of string = {
[0] = "UNKNOWN",
[1] = "NATIVE",
[2] = "WINDOWS_GUI",
[3] = "WINDOWS_CUI",
[7] = "POSIX_CUI",
[9] = "WINDOWS_CE_GUI",
[10] = "EFI_APPLICATION",
[11] = "EFI_BOOT_SERVICE_DRIVER",
[12] = "EFI_RUNTIME_DRIVER",
[13] = "EFI_ROM",
[14] = "XBOX"
} &default=function(i: count):string { return fmt("unknown-%d", i); };
const directories: table[count] of string = {
[0] = "Export Table",
[1] = "Import Table",
[2] = "Resource Table",
[3] = "Exception Table",
[4] = "Certificate Table",
[5] = "Base Relocation Table",
[6] = "Debug",
[7] = "Architecture",
[8] = "Global Ptr",
[9] = "TLS Table",
[10] = "Load Config Table",
[11] = "Bound Import",
[12] = "IAT",
[13] = "Delay Import Descriptor",
[14] = "CLR Runtime Header",
[15] = "Reserved"
} &default=function(i: count):string { return fmt("unknown-%d", i); };
const section_characteristics: table[count] of string = {
[0x8] = "TYPE_NO_PAD",
[0x20] = "CNT_CODE",
[0x40] = "CNT_INITIALIZED_DATA",
[0x80] = "CNT_UNINITIALIZED_DATA",
[0x100] = "LNK_OTHER",
[0x200] = "LNK_INFO",
[0x800] = "LNK_REMOVE",
[0x1000] = "LNK_COMDAT",
[0x8000] = "GPREL",
[0x20000] = "MEM_16BIT",
[0x40000] = "MEM_LOCKED",
[0x80000] = "MEM_PRELOAD",
[0x100000] = "ALIGN_1BYTES",
[0x200000] = "ALIGN_2BYTES",
[0x300000] = "ALIGN_4BYTES",
[0x400000] = "ALIGN_8BYTES",
[0x500000] = "ALIGN_16BYTES",
[0x600000] = "ALIGN_32BYTES",
[0x700000] = "ALIGN_64BYTES",
[0x800000] = "ALIGN_128BYTES",
[0x900000] = "ALIGN_256BYTES",
[0xa00000] = "ALIGN_512BYTES",
[0xb00000] = "ALIGN_1024BYTES",
[0xc00000] = "ALIGN_2048BYTES",
[0xd00000] = "ALIGN_4096BYTES",
[0xe00000] = "ALIGN_8192BYTES",
[0x1000000] = "LNK_NRELOC_OVFL",
[0x2000000] = "MEM_DISCARDABLE",
[0x4000000] = "MEM_NOT_CACHED",
[0x8000000] = "MEM_NOT_PAGED",
[0x10000000] = "MEM_SHARED",
[0x20000000] = "MEM_EXECUTE",
[0x40000000] = "MEM_READ",
[0x80000000] = "MEM_WRITE"
} &default=function(i: count):string { return fmt("unknown-%d", i); };
const os_versions: table[count, count] of string = {
[10,0] = "Windows 10",
[6,4] = "Windows 10 Technical Preview",
[6,3] = "Windows 8.1 or Server 2012 R2",
[6,2] = "Windows 8 or Server 2012",
[6,1] = "Windows 7 or Server 2008 R2",
[6,0] = "Windows Vista or Server 2008",
[5,2] = "Windows XP x64 or Server 2003",
[5,1] = "Windows XP",
[5,0] = "Windows 2000",
[4,90] = "Windows Me",
[4,10] = "Windows 98",
[4,0] = "Windows 95 or NT 4.0",
[3,51] = "Windows NT 3.51",
[3,50] = "Windows NT 3.5",
[3,2] = "Windows 3.2",
[3,11] = "Windows for Workgroups 3.11",
[3,10] = "Windows 3.1 or NT 3.1",
[3,0] = "Windows 3.0",
[2,11] = "Windows 2.11",
[2,10] = "Windows 2.10",
[2,0] = "Windows 2.0",
[1,4] = "Windows 1.04",
[1,3] = "Windows 1.03",
[1,1] = "Windows 1.01",
[1,0] = "Windows 1.0",
} &default=function(i: count, j: count):string { return fmt("unknown-%d.%d", i, j); };
const section_descs: table[string] of string = {
[".bss"] = "Uninitialized data",
[".cormeta"] = "CLR metadata that indicates that the object file contains managed code",
[".data"] = "Initialized data",
[".debug$F"] = "Generated FPO debug information",
[".debug$P"] = "Precompiled debug types",
[".debug$S"] = "Debug symbols",
[".debug$T"] = "Debug types",
[".drective"] = "Linker options",
[".edata"] = "Export tables",
[".idata"] = "Import tables",
[".idlsym"] = "Includes registered SEH to support IDL attributes",
[".pdata"] = "Exception information",
[".rdata"] = "Read-only initialized data",
[".reloc"] = "Image relocations",
[".rsrc"] = "Resource directory",
[".sbss"] = "GP-relative uninitialized data",
[".sdata"] = "GP-relative initialized data",
[".srdata"] = "GP-relative read-only data",
[".sxdata"] = "Registered exception handler data",
[".text"] = "Executable code",
[".tls"] = "Thread-local storage",
[".tls$"] = "Thread-local storage",
[".vsdata"] = "GP-relative initialized data",
[".xdata"] = "Exception information",
} &default=function(i: string):string { return fmt("unknown-%s", i); };
}

View file

@ -0,0 +1,137 @@
module PE;
@load ./consts.bro
export {
redef enum Log::ID += { LOG };
type Info: record {
## Current timestamp.
ts: time &log;
## File id of this portable executable file.
id: string &log;
## The target machine that the file was compiled for.
machine: string &log &optional;
## The time that the file was created at.
compile_ts: time &log &optional;
## The required operating system.
os: string &log &optional;
## The subsystem that is required to run this file.
subsystem: string &log &optional;
## Is the file an executable, or just an object file?
is_exe: bool &log &default=T;
## Is the file a 64-bit executable?
is_64bit: bool &log &default=T;
## Does the file support Address Space Layout Randomization?
uses_aslr: bool &log &default=F;
## Does the file support Data Execution Prevention?
uses_dep: bool &log &default=F;
## Does the file enforce code integrity checks?
uses_code_integrity: bool &log &default=F;
## Does the file use structured exception handing?
uses_seh: bool &log &default=T;
## Does the file have an import table?
has_import_table: bool &log &optional;
## Does the file have an export table?
has_export_table: bool &log &optional;
## Does the file have an attribute certificate table?
has_cert_table: bool &log &optional;
## Does the file have a debug table?
has_debug_data: bool &log &optional;
## The names of the sections, in order.
section_names: vector of string &log &optional;
};
## Event for accessing logged records.
global log_pe: event(rec: Info);
## A hook that gets called when we first see a PE file.
global set_file: hook(f: fa_file);
}
redef record fa_file += {
pe: Info &optional;
};
const pe_mime_types = { "application/x-dosexec" };
event bro_init() &priority=5
{
Files::register_for_mime_types(Files::ANALYZER_PE, pe_mime_types);
Log::create_stream(LOG, [$columns=Info, $ev=log_pe, $path="pe"]);
}
hook set_file(f: fa_file) &priority=5
{
if ( ! f?$pe )
f$pe = [$ts=network_time(), $id=f$id];
}
event pe_dos_header(f: fa_file, h: PE::DOSHeader) &priority=5
{
hook set_file(f);
}
event pe_file_header(f: fa_file, h: PE::FileHeader) &priority=5
{
hook set_file(f);
f$pe$machine = machine_types[h$machine];
f$pe$compile_ts = h$ts;
f$pe$is_exe = ( h$optional_header_size > 0 );
for ( c in h$characteristics )
{
if ( file_characteristics[c] == "32BIT_MACHINE" )
f$pe$is_64bit = F;
}
}
event pe_optional_header(f: fa_file, h: PE::OptionalHeader) &priority=5
{
hook set_file(f);
# Only EXEs have optional headers
if ( ! f$pe$is_exe )
return;
f$pe$os = os_versions[h$os_version_major, h$os_version_minor];
f$pe$subsystem = windows_subsystems[h$subsystem];
for ( c in h$dll_characteristics )
{
if ( dll_characteristics[c] == "DYNAMIC_BASE" )
f$pe$uses_aslr = T;
if ( dll_characteristics[c] == "FORCE_INTEGRITY" )
f$pe$uses_code_integrity = T;
if ( dll_characteristics[c] == "NX_COMPAT" )
f$pe$uses_dep = T;
if ( dll_characteristics[c] == "NO_SEH" )
f$pe$uses_seh = F;
}
f$pe$has_export_table = (|h$table_sizes| > 0 && h$table_sizes[0] > 0);
f$pe$has_import_table = (|h$table_sizes| > 1 && h$table_sizes[1] > 0);
f$pe$has_cert_table = (|h$table_sizes| > 4 && h$table_sizes[4] > 0);
f$pe$has_debug_data = (|h$table_sizes| > 6 && h$table_sizes[6] > 0);
}
event pe_section_header(f: fa_file, h: PE::SectionHeader) &priority=5
{
hook set_file(f);
# Only EXEs have section headers
if ( ! f$pe$is_exe )
return;
if ( ! f$pe?$section_names )
f$pe$section_names = vector();
f$pe$section_names[|f$pe$section_names|] = h$name;
}
event file_state_remove(f: fa_file) &priority=-5
{
if ( f?$pe && f$pe?$machine )
Log::write(LOG, f$pe);
}

View file

@ -195,7 +195,7 @@ event Input::end_of_data(name: string, source: string)
event bro_init() &priority=5
{
Log::create_stream(Unified2::LOG, [$columns=Info, $ev=log_unified2]);
Log::create_stream(Unified2::LOG, [$columns=Info, $ev=log_unified2, $path="unified2"]);
if ( sid_msg == "" )
{

View file

@ -36,7 +36,7 @@ export {
event bro_init() &priority=5
{
Log::create_stream(X509::LOG, [$columns=Info, $ev=log_x509]);
Log::create_stream(X509::LOG, [$columns=Info, $ev=log_x509, $path="x509"]);
}
redef record Files::Info += {
@ -47,6 +47,9 @@ redef record Files::Info += {
event x509_certificate(f: fa_file, cert_ref: opaque of x509, cert: X509::Certificate) &priority=5
{
if ( ! f$info?$mime_type )
f$info$mime_type = "application/pkix-cert";
f$info$x509 = [$ts=f$info$ts, $id=f$id, $certificate=cert, $handle=cert_ref];
}

View file

@ -0,0 +1,2 @@
The Broker communication framework facilitates connecting to remote Bro
instances to share state and transfer events.

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,103 @@
##! Various data structure definitions for use with Bro's communication system.
module BrokerComm;
export {
## A name used to identify this endpoint to peers.
## .. bro:see:: BrokerComm::connect BrokerComm::listen
const endpoint_name = "" &redef;
## Change communication behavior.
type EndpointFlags: record {
## Whether to restrict message topics that can be published to peers.
auto_publish: bool &default = T;
## Whether to restrict what message topics or data store identifiers
## the local endpoint advertises to peers (e.g. subscribing to
## events or making a master data store available).
auto_advertise: bool &default = T;
};
## Fine-grained tuning of communication behavior for a particular message.
type SendFlags: record {
## Send the message to the local endpoint.
self: bool &default = F;
## Send the message to peer endpoints that advertise interest in
## the topic associated with the message.
peers: bool &default = T;
## Send the message to peer endpoints even if they don't advertise
## interest in the topic associated with the message.
unsolicited: bool &default = F;
};
## Opaque communication data.
type Data: record {
d: opaque of BrokerComm::Data &optional;
};
## Opaque communication data.
type DataVector: vector of BrokerComm::Data;
## Opaque event communication data.
type EventArgs: record {
## The name of the event. Not set if invalid event or arguments.
name: string &optional;
## The arguments to the event.
args: DataVector;
};
## Opaque communication data used as a convenient way to wrap key-value
## pairs that comprise table entries.
type TableItem : record {
key: BrokerComm::Data;
val: BrokerComm::Data;
};
}
module BrokerStore;
export {
## Whether a data store query could be completed or not.
type QueryStatus: enum {
SUCCESS,
FAILURE,
};
## An expiry time for a key-value pair inserted in to a data store.
type ExpiryTime: record {
## Absolute point in time at which to expire the entry.
absolute: time &optional;
## A point in time relative to the last modification time at which
## to expire the entry. New modifications will delay the expiration.
since_last_modification: interval &optional;
};
## The result of a data store query.
type QueryResult: record {
## Whether the query completed or not.
status: BrokerStore::QueryStatus;
## The result of the query. Certain queries may use a particular
## data type (e.g. querying store size always returns a count, but
## a lookup may return various data types).
result: BrokerComm::Data;
};
## Options to tune the SQLite storage backend.
type SQLiteOptions: record {
## File system path of the database.
path: string &default = "store.sqlite";
};
## Options to tune the RocksDB storage backend.
type RocksDBOptions: record {
## File system path of the database.
path: string &default = "store.rocksdb";
};
## Options to tune the particular storage backends.
type BackendOptions: record {
sqlite: SQLiteOptions &default = SQLiteOptions();
rocksdb: RocksDBOptions &default = RocksDBOptions();
};
}

View file

@ -126,6 +126,9 @@ export {
## This is usually supplied on the command line for each instance
## of the cluster that is started up.
const node = getenv("CLUSTER_NODE") &redef;
## Interval for retrying failed connections between cluster nodes.
const retry_interval = 1min &redef;
}
function is_enabled(): bool
@ -159,5 +162,5 @@ event bro_init() &priority=5
terminate();
}
Log::create_stream(Cluster::LOG, [$columns=Info]);
Log::create_stream(Cluster::LOG, [$columns=Info, $path="cluster"]);
}

View file

@ -39,7 +39,7 @@ event bro_init() &priority=9
Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T, $retry=1min,
$connect=T, $retry=retry_interval,
$events=tm2manager_events];
}
@ -58,7 +58,7 @@ event bro_init() &priority=9
if ( n?$proxy )
Communication::nodes[i]
= [$host=n$ip, $zone_id=n$zone_id, $p=n$p,
$connect=T, $auth=F, $sync=T, $retry=1mins];
$connect=T, $auth=F, $sync=T, $retry=retry_interval];
else if ( me?$proxy && me$proxy == i )
Communication::nodes[me$proxy]
= [$host=nodes[i]$ip, $zone_id=nodes[i]$zone_id,
@ -70,7 +70,7 @@ event bro_init() &priority=9
Communication::nodes["manager"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T, $retry=1mins,
$connect=T, $retry=retry_interval,
$class=node,
$events=manager2proxy_events];
}
@ -80,7 +80,7 @@ event bro_init() &priority=9
Communication::nodes["manager"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T, $retry=1mins,
$connect=T, $retry=retry_interval,
$class=node,
$events=manager2worker_events];
@ -88,7 +88,7 @@ event bro_init() &priority=9
Communication::nodes["proxy"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T, $retry=1mins,
$connect=T, $retry=retry_interval,
$sync=T, $class=node,
$events=proxy2worker_events];
@ -98,7 +98,7 @@ event bro_init() &priority=9
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T,
$retry=1min,
$retry=retry_interval,
$events=tm2worker_events];
}

View file

@ -164,7 +164,7 @@ const src_names = {
event bro_init() &priority=5
{
Log::create_stream(Communication::LOG, [$columns=Info]);
Log::create_stream(Communication::LOG, [$columns=Info, $path="communication"]);
}
function do_script_log_common(level: count, src: count, msg: string)

View file

@ -38,7 +38,7 @@ redef record connection += {
event bro_init() &priority=5
{
Log::create_stream(DPD::LOG, [$columns=Info]);
Log::create_stream(DPD::LOG, [$columns=Info, $path="dpd"]);
}
event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=10

View file

@ -1,3 +1,9 @@
@load-sigs ./archive
@load-sigs ./audio
@load-sigs ./font
@load-sigs ./general
@load-sigs ./image
@load-sigs ./msoffice
@load-sigs ./video
@load-sigs ./libmagic

View file

@ -0,0 +1,176 @@
signature file-tar {
file-magic /^[[:print:]\x00]{100}([[:digit:]\x20]{7}\x00){3}([[:digit:]\x20]{11}\x00){2}([[:digit:]\x00\x20]{7}[\x20\x00])[0-7\x00]/
file-mime "application/x-tar", 100
}
# This is low priority so that files using zip as a
# container will be identified correctly.
signature file-zip {
file-mime "application/zip", 10
file-magic /^PK\x03\x04.{2}/
}
# Multivolume Zip archive
signature file-multi-zip {
file-mime "application/zip", 10
file-magic /^PK\x07\x08PK\x03\x04/
}
# RAR
signature file-rar {
file-mime "application/x-rar", 70
file-magic /^Rar!/
}
# GZIP
signature file-gzip {
file-mime "application/x-gzip", 100
file-magic /\x1f\x8b/
}
# Microsoft Cabinet
signature file-ms-cab {
file-mime "application/vnd.ms-cab-compressed", 110
file-magic /^MSCF\x00\x00\x00\x00/
}
# Mac OS X DMG files
signature file-dmg {
file-magic /^(\x78\x01\x73\x0D\x62\x62\x60|\x78\xDA\x63\x60\x18\x05|\x78\x01\x63\x60\x18\x05|\x78\xDA\x73\x0D|\x78[\x01\xDA]\xED[\xD0-\xD9])/
file-mime "application/x-dmg", 100
}
# XAR (eXtensible ARchive) format.
# Mac OS X uses this for the .pkg format.
signature file-xar {
file-magic /^xar\!/
file-mime "application/x-xar", 100
}
# RPM
signature file-magic-auto352 {
file-mime "application/x-rpm", 70
file-magic /^(drpm|\xed\xab\xee\xdb)/
}
# StuffIt
signature file-stuffit {
file-mime "application/x-stuffit", 70
file-magic /^(SIT\x21|StuffIt)/
}
# Archived data
signature file-x-archive {
file-mime "application/x-archive", 70
file-magic /^!?<ar(ch)?>/
}
# ARC archive data
signature file-arc {
file-mime "application/x-arc", 70
file-magic /^[\x00-\x7f]{2}[\x02-\x0a\x14\x48]\x1a/
}
# EET archive
signature file-eet {
file-mime "application/x-eet", 70
file-magic /^\x1e\xe7\xff\x00/
}
# Zoo archive
signature file-zoo {
file-mime "application/x-zoo", 70
file-magic /^.{20}\xdc\xa7\xc4\xfd/
}
# LZ4 compressed data (legacy format)
signature file-lz4-legacy {
file-mime "application/x-lz4", 70
file-magic /(\x02\x21\x4c\x18)/
}
# LZ4 compressed data
signature file-lz4 {
file-mime "application/x-lz4", 70
file-magic /^\x04\x22\x4d\x18/
}
# LRZIP compressed data
signature file-lrzip {
file-mime "application/x-lrzip", 1
file-magic /^LRZI/
}
# LZIP compressed data
signature file-lzip {
file-mime "application/x-lzip", 70
file-magic /^LZIP/
}
# Self-extracting PKZIP archive
signature file-magic-auto434 {
file-mime "application/zip", 340
file-magic /^MZ.{28}(Copyright 1989\x2d1990 PKWARE Inc|PKLITE Copr)\x2e/
}
# LHA archive (LZH)
signature file-lzh {
file-mime "application/x-lzh", 80
file-magic /^.{2}-(lh[ abcdex0-9]|lz[s2-8]|lz[s2-8]|pm[s012]|pc1)-/
}
# WARC Archive
signature file-warc {
file-mime "application/warc", 50
file-magic /^WARC\x2f/
}
# 7-zip archive data
signature file-7zip {
file-mime "application/x-7z-compressed", 50
file-magic /^7z\xbc\xaf\x27\x1c/
}
# XZ compressed data
signature file-xz {
file-mime "application/x-xz", 90
file-magic /^\xfd7zXZ\x00/
}
# LHa self-extracting archive
signature file-magic-auto436 {
file-mime "application/x-lha", 120
file-magic /^MZ.{34}LH[aA]\x27s SFX/
}
# ARJ archive data
signature file-arj {
file-mime "application/x-arj", 50
file-magic /^\x60\xea/
}
# Byte-swapped cpio archive
signature file-bs-cpio {
file-mime "application/x-cpio", 50
file-magic /(\x71\xc7|\xc7\x71)/
}
# CPIO archive
signature file-cpio {
file-mime "application/x-cpio", 50
file-magic /^(\xc7\x71|\x71\xc7)/
}
# Compress'd data
signature file-compress {
file-mime "application/x-compress", 50
file-magic /^\x1f\x9d/
}
# LZMA compressed data
signature file-lzma {
file-mime "application/x-lzma", 71
file-magic /^\x5d\x00\x00/
}

View file

@ -0,0 +1,13 @@
# MPEG v3 audio
signature file-mpeg-audio {
file-mime "audio/mpeg", 20
file-magic /^\xff[\xe2\xe3\xf2\xf3\xf6\xf7\xfa\xfb\xfc\xfd]/
}
# MPEG v4 audio
signature file-m4a {
file-mime "audio/m4a", 70
file-magic /^....ftyp(m4a)/
}

View file

@ -0,0 +1,41 @@
# Web Open Font Format
signature file-woff {
file-magic /^wOFF/
file-mime "application/font-woff", 70
}
# TrueType font
signature file-ttf {
file-mime "application/x-font-ttf", 80
file-magic /^\x00\x01\x00\x00\x00/
}
signature file-embedded-opentype {
file-mime "application/vnd.ms-fontobject", 50
file-magic /^.{34}LP/
}
# X11 SNF font
signature file-snf {
file-mime "application/x-font-sfn", 70
file-magic /^(\x04\x00\x00\x00|\x00\x00\x00\x04).{100}(\x04\x00\x00\x00|\x00\x00\x00\x04)/
}
# OpenType font
signature file-opentype {
file-mime "application/vnd.ms-opentype", 70
file-magic /^OTTO/
}
# FrameMaker Font file
signature file-maker-screen-font {
file-mime "application/x-mif", 190
file-magic /^\x3cMakerScreenFont/
}
# >0 string,=SplineFontDB: (len=13), ["Spline Font Database "], swap_endian=0
signature file-spline-font-db {
file-mime "application/vnd.font-fontforge-sfd", 160
file-magic /^SplineFontDB\x3a/
}

View file

@ -1,18 +1,93 @@
# General purpose file magic signatures.
# Plaintext
# (Including BOMs for UTF-8, 16, and 32)
signature file-plaintext {
file-magic /^([[:print:][:space:]]{10})/
file-mime "text/plain", -20
file-mime "text/plain", -20
file-magic /^(\xef\xbb\xbf|(\x00\x00)?\xfe\xff|\xff\xfe(\x00\x00)?)?[[:space:]\x20-\x7E]{10}/
}
signature file-tar {
file-magic /^[[:print:]\x00]{100}([[:digit:]\x20]{7}\x00){3}([[:digit:]\x20]{11}\x00){2}([[:digit:]\x00\x20]{7}[\x20\x00])[0-7\x00]/
file-mime "application/x-tar", 100
signature file-json {
file-mime "text/json", 1
file-magic /^(\xef\xbb\xbf)?[\x0d\x0a[:blank:]]*\{[\x0d\x0a[:blank:]]*(["][^"]{1,}["]|[a-zA-Z][a-zA-Z0-9\\_]*)[\x0d\x0a[:blank:]]*:[\x0d\x0a[:blank:]]*(["]|\[|\{|[0-9]|true|false)/
}
signature file-zip {
file-mime "application/zip", 10
file-magic /^PK\x03\x04.{2}/
signature file-json2 {
file-mime "text/json", 1
file-magic /^(\xef\xbb\xbf)?[\x0d\x0a[:blank:]]*\[[\x0d\x0a[:blank:]]*(((["][^"]{1,}["]|[0-9]{1,}(\.[0-9]{1,})?|true|false)[\x0d\x0a[:blank:]]*,)|\{|\[)[\x0d\x0a[:blank:]]*/
}
# Match empty JSON documents.
signature file-json3 {
file-mime "text/json", 0
file-magic /^(\xef\xbb\xbf)?[\x0d\x0a[:blank:]]*(\[\]|\{\})[\x0d\x0a[:blank:]]*$/
}
signature file-xml {
file-mime "application/xml", 10
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<\?xml /
}
signature file-xhtml {
file-mime "text/html", 100
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<(![dD][oO][cC][tT][yY][pP][eE] {1,}[hH][tT][mM][lL]|[hH][tT][mM][lL]|[mM][eE][tT][aA] {1,}[hH][tT][tT][pP]-[eE][qQ][uU][iI][vV])/
}
signature file-html {
file-mime "text/html", 49
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<![dD][oO][cC][tT][yY][pP][eE] {1,}[hH][tT][mM][lL]/
}
signature file-html2 {
file-mime "text/html", 20
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<([hH][eE][aA][dD]|[hH][tT][mM][lL]|[tT][iI][tT][lL][eE]|[bB][oO][dD][yY])/
}
signature file-rss {
file-mime "text/rss", 90
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[rR][sS][sS]/
}
signature file-atom {
file-mime "text/atom", 100
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<([rR][sS][sS][^>]*xmlns:atom|[fF][eE][eE][dD][^>]*xmlns=["']?http:\/\/www.w3.org\/2005\/Atom["']?)/
}
signature file-soap {
file-mime "application/soap+xml", 49
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[sS][oO][aA][pP](-[eE][nN][vV])?:[eE][nN][vV][eE][lL][oO][pP][eE]/
}
signature file-cross-domain-policy {
file-mime "text/x-cross-domain-policy", 49
file-magic /^([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<![dD][oO][cC][tT][yY][pP][eE] {1,}[cC][rR][oO][sS][sS]-[dD][oO][mM][aA][iI][nN]-[pP][oO][lL][iI][cC][yY]/
}
signature file-cross-domain-policy2 {
file-mime "text/x-cross-domain-policy", 49
file-magic /^([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[cC][rR][oO][sS][sS]-[dD][oO][mM][aA][iI][nN]-[pP][oO][lL][iI][cC][yY]/
}
signature file-xmlrpc {
file-mime "application/xml-rpc", 49
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[mM][eE][tT][hH][oO][dD][rR][eE][sS][pP][oO][nN][sS][eE]>/
}
signature file-coldfusion {
file-mime "magnus-internal/cold-fusion", 20
file-magic /^([\x0d\x0a[:blank:]]*(<!--.*-->)?)*<(CFPARAM|CFSET|CFIF)/
}
# Adobe Flash Media Manifest
signature file-f4m {
file-mime "application/f4m", 49
file-magic /^(\xef\xbb\xbf)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*(<\?xml .*\?>)?([\x0d\x0a[:blank:]]*(<!--.*-->)?[\x0d\x0a[:blank:]]*)*<[mM][aA][nN][iI][fF][eE][sS][tT][\x0d\x0a[:blank:]]{1,}xmlns=\"http:\/\/ns\.adobe\.com\/f4m\//
}
# Microsoft LNK files
signature file-lnk {
file-mime "application/x-ms-shortcut", 49
file-magic /^\x4C\x00\x00\x00\x01\x14\x02\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x10\x00\x00\x00\x46/
}
signature file-jar {
@ -21,8 +96,20 @@ signature file-jar {
}
signature file-java-applet {
file-magic /^\xca\xfe\xba\xbe...[\x2e-\x34]/
file-mime "application/x-java-applet", 71
file-magic /^\xca\xfe\xba\xbe...[\x2d-\x34]/
}
# OCSP requests over HTTP.
signature file-ocsp-request {
file-magic /^.{11,19}\x06\x05\x2b\x0e\x03\x02\x1a/
file-mime "application/ocsp-request", 71
}
# OCSP responses over HTTP.
signature file-ocsp-response {
file-magic /^.{11,19}\x06\x09\x2B\x06\x01\x05\x05\x07\x30\x01\x01/
file-mime "application/ocsp-response", 71
}
# Shockwave flash
@ -37,12 +124,6 @@ signature file-tnef {
file-mime "application/vnd.ms-tnef", 100
}
# Mac OS X DMG files
signature file-dmg {
file-magic /^(\x78\x01\x73\x0D\x62\x62\x60|\x78\xDA\x63\x60\x18\x05|\x78\x01\x63\x60\x18\x05|\x78\xDA\x73\x0D|\x78[\x01\xDA]\xED[\xD0-\xD9])/
file-mime "application/x-dmg", 100
}
# Mac OS X Mach-O executable
signature file-mach-o {
file-magic /^[\xce\xcf]\xfa\xed\xfe/
@ -55,13 +136,6 @@ signature file-mach-o-universal {
file-mime "application/x-mach-o-executable", 100
}
# XAR (eXtensible ARchive) format.
# Mac OS X uses this for the .pkg format.
signature file-xar {
file-magic /^xar\!/
file-mime "application/x-xar", 100
}
signature file-pkcs7 {
file-magic /^MIME-Version:.*protocol=\"application\/pkcs7-signature\"/
file-mime "application/pkcs7-signature", 100
@ -79,16 +153,6 @@ signature file-jnlp {
file-mime "application/x-java-jnlp-file", 100
}
signature file-ico {
file-magic /^\x00\x00\x01\x00/
file-mime "image/x-icon", 70
}
signature file-cur {
file-magic /^\x00\x00\x02\x00/
file-mime "image/x-cursor", 70
}
signature file-pcap {
file-magic /^(\xa1\xb2\xc3\xd4|\xd4\xc3\xb2\xa1)/
file-mime "application/vnd.tcpdump.pcap", 70
@ -119,7 +183,58 @@ signature file-python {
file-mime "text/x-python", 60
}
signature file-awk {
file-mime "text/x-awk", 60
file-magic /^\x23\x21[^\n]{1,15}bin\/(env[[:space:]]+)?(g|n)?awk/
}
signature file-tcl {
file-mime "text/x-tcl", 60
file-magic /^\x23\x21[^\n]{1,15}bin\/(env[[:space:]]+)?(wish|tcl)/
}
signature file-lua {
file-mime "text/x-lua", 49
file-magic /^\x23\x21[^\n]{1,15}bin\/(env[[:space:]]+)?lua/
}
signature file-javascript {
file-mime "application/javascript", 60
file-magic /^\x23\x21[^\n]{1,15}bin\/(env[[:space:]]+)?node(js)?/
}
signature file-javascript2 {
file-mime "application/javascript", 60
file-magic /^[\x0d\x0a[:blank:]]*<[sS][cC][rR][iI][pP][tT][[:blank:]]+([tT][yY][pP][eE]|[lL][aA][nN][gG][uU][aA][gG][eE])=['"]?([tT][eE][xX][tT]\/)?[jJ][aA][vV][aA][sS][cC][rR][iI][pP][tT]/
}
signature file-javascript3 {
file-mime "application/javascript", 60
# This seems to be a somewhat common idiom in javascript.
file-magic /^[\x0d\x0a[:blank:]]*for \(;;\);/
}
signature file-javascript4 {
file-mime "application/javascript", 60
file-magic /^[\x0d\x0a[:blank:]]*document\.write(ln)?[:blank:]?\(/
}
signature file-javascript5 {
file-mime "application/javascript", 60
file-magic /^\(function\(\)[[:blank:]\n]*\{/
}
signature file-javascript6 {
file-mime "application/javascript", 60
file-magic /^[\x0d\x0a[:blank:]]*<script>[\x0d\x0a[:blank:]]*(var|function) /
}
signature file-php {
file-mime "text/x-php", 60
file-magic /^\x23\x21[^\n]{1,15}bin\/(env[[:space:]]+)?php/
}
signature file-php2 {
file-magic /^.*<\?php/
file-mime "text/x-php", 40
}
@ -135,3 +250,23 @@ signature file-skp {
file-magic /^\xFF\xFE\xFF\x0E\x53\x00\x6B\x00\x65\x00\x74\x00\x63\x00\x68\x00\x55\x00\x70\x00\x20\x00\x4D\x00\x6F\x00\x64\x00\x65\x00\x6C\x00/
file-mime "application/skp", 100
}
signature file-elf-object {
file-mime "application/x-object", 50
file-magic /\x7fELF[\x01\x02](\x01.{10}\x01\x00|\x02.{10}\x00\x01)/
}
signature file-elf {
file-mime "application/x-executable", 50
file-magic /\x7fELF[\x01\x02](\x01.{10}\x02\x00|\x02.{10}\x00\x02)/
}
signature file-elf-sharedlib {
file-mime "application/x-sharedlib", 50
file-magic /\x7fELF[\x01\x02](\x01.{10}\x03\x00|\x02.{10}\x00\x03)/
}
signature file-elf-coredump {
file-mime "application/x-coredump", 50
file-magic /\x7fELF[\x01\x02](\x01.{10}\x04\x00|\x02.{10}\x00\x04)/
}

View file

@ -0,0 +1,166 @@
signature file-tiff {
file-mime "image/tiff", 70
file-magic /^(MM\x00[\x2a\x2b]|II[\x2a\x2b]\x00)/
}
signature file-gif {
file-mime "image/gif", 70
file-magic /^GIF8/
}
# JPEG image
signature file-jpeg {
file-mime "image/jpeg", 52
file-magic /^\xff\xd8/
}
signature file-bmp {
file-mime "image/x-ms-bmp", 50
file-magic /BM.{12}[\x0c\x28\x40\x6c\x7c\x80]\x00/
}
signature file-ico {
file-magic /^\x00\x00\x01\x00/
file-mime "image/x-icon", 70
}
signature file-cur {
file-magic /^\x00\x00\x02\x00/
file-mime "image/x-cursor", 70
}
signature file-magic-auto289 {
file-mime "image/vnd.adobe.photoshop", 70
file-magic /^8BPS/
}
signature file-png {
file-mime "image/png", 110
file-magic /^\x89PNG/
}
# JPEG 2000
signature file-jp2 {
file-mime "image/jp2", 60
file-magic /.{4}ftypjp2/
}
# JPEG 2000
signature file-jp22 {
file-mime "image/jp2", 70
file-magic /\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a.{8}jp2 /
}
# JPEG 2000
signature file-jpx {
file-mime "image/jpx", 70
file-magic /\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a.{8}jpx /
}
# JPEG 2000
signature file-jpm {
file-mime "image/jpm", 70
file-magic /\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a.{8}jpm /
}
# Xcursor image
signature file-x-cursor {
file-mime "image/x-xcursor", 70
file-magic /^Xcur/
}
# NIFF image
signature file-niff {
file-mime "image/x-niff", 70
file-magic /^IIN1/
}
# OpenEXR image
signature file-openexr {
file-mime "image/x-exr", 70
file-magic /^\x76\x2f\x31\x01/
}
# DPX image
signature file-dpx {
file-mime "image/x-dpx", 70
file-magic /^SDPX/
}
# Cartesian Perceptual Compression image
signature file-cpi {
file-mime "image/x-cpi", 70
file-magic /(CPC\xb2)/
}
signature file-orf {
file-mime "image/x-olympus-orf", 70
file-magic /IIR[OS]|MMOR/
}
# Foveon X3F raw image
signature file-x3r {
file-mime "image/x-x3f", 70
file-magic /^FOVb/
}
# Paint.NET image
signature file-paint-net {
file-mime "image/x-paintnet", 70
file-magic /^PDN3/
}
# Corel Draw Picture
signature file-coreldraw {
file-mime "image/x-coreldraw", 70
file-magic /^RIFF....CDR[A6]/
}
# Netpbm PAM image
signature file-netbpm{
file-mime "image/x-portable-pixmap", 50
file-magic /^P7/
}
# JPEG 2000 image
signature file-jpeg-2000 {
file-mime "image/jp2", 50
file-magic /^....jP/
}
# DjVU Images
signature file-djvu {
file-mime "image/vnd.djvu", 70
file-magic /AT\x26TFORM.{4}(DJV[MUI]|THUM)/
}
# DWG AutoDesk AutoCAD
signature file-dwg {
file-mime "image/vnd.dwg", 90
file-magic /^(AC[12]\.|AC10)/
}
# GIMP XCF image
signature file-gimp-xcf {
file-mime "image/x-xcf", 110
file-magic /^gimp xcf/
}
# Polar Monitor Bitmap text
signature file-polar-monitor-bitmap {
file-mime "image/x-polar-monitor-bitmap", 160
file-magic /^\x5bBitmapInfo2\x5d/
}
# Award BIOS bitmap
signature file-award-bitmap {
file-mime "image/x-award-bmp", 20
file-magic /^AWBM/
}
# Award BIOS Logo, 136 x 84
signature file-award-bios-logo {
file-mime "image/x-award-bioslogo", 50
file-magic /^\x11[\x06\x09]/
}

File diff suppressed because it is too large Load diff

View file

@ -26,3 +26,9 @@ signature file-pptx {
file-magic /^PK\x03\x04.{26}(\[Content_Types\]\.xml|_rels\x2f\.rels|ppt\x2f).*PK\x03\x04.{26}ppt\x2f/
file-mime "application/vnd.openxmlformats-officedocument.presentationml.presentation", 80
}
signature file-msaccess {
file-mime "application/x-msaccess", 180
file-magic /.{4}Standard (Jet|ACE) DB\x00/
}

View file

@ -0,0 +1,105 @@
# Macromedia Flash Video
signature file-flv {
file-mime "video/x-flv", 60
file-magic /^FLV/
}
# FLI animation
signature file-fli {
file-mime "video/x-fli", 50
file-magic /^.{4}\x11\xaf/
}
# FLC animation
signature file-flc {
file-mime "video/x-flc", 50
file-magic /^.{4}\x12\xaf/
}
# Motion JPEG 2000
signature file-mj2 {
file-mime "video/mj2", 70
file-magic /\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a.{8}mjp2/
}
# MNG video
signature file-mng {
file-mime "video/x-mng", 70
file-magic /^\x8aMNG/
}
# JNG video
signature file-jng {
file-mime "video/x-jng", 70
file-magic /^\x8bJNG/
}
# Generic MPEG container
signature file-mpeg {
file-mime "video/mpeg", 50
file-magic /(\x00\x00\x01[\xb0-\xbb])/
}
# MPV
signature file-mpv {
file-mime "video/mpv", 71
file-magic /(\x00\x00\x01\xb3)/
}
# H.264
signature file-h264 {
file-mime "video/h264", 41
file-magic /(\x00\x00\x00\x01)([\x07\x27\x47\x67\x87\xa7\xc7\xe7])/
}
# WebM video
signature file-webm {
file-mime "video/webm", 70
file-magic /(\x1a\x45\xdf\xa3)(.*)(B\x82)(.{1})(webm)/
}
# Matroska video
signature file-matroska {
file-mime "video/x-matroska", 110
file-magic /(\x1a\x45\xdf\xa3)(.*)(B\x82)(.{1})(matroska)/
}
# MP2P
signature file-mp2p {
file-mime "video/mp2p", 21
file-magic /\x00\x00\x01\xba([\x40-\x7f\xc0-\xff])/
}
# MPEG transport stream data. These files typically have the extension "ts".
# Note: The 0x47 repeats every 188 bytes. Using four as the number of
# occurrences for the test here is arbitrary.
signature file-mp2t {
file-mime "video/mp2t", 40
file-magic /^(\x47.{187}){4}/
}
# Silicon Graphics video
signature file-sgi-movie {
file-mime "video/x-sgi-movie", 70
file-magic /^MOVI/
}
# Apple QuickTime movie
signature file-quicktime {
file-mime "video/quicktime", 70
file-magic /^....(mdat|moov)/
}
# MPEG v4 video
signature file-mp4 {
file-mime "video/mp4", 70
file-magic /^....ftyp(isom|mp4[12])/
}
# 3GPP Video
signature file-3gpp {
file-mime "video/3gpp", 60
file-magic /^....ftyp(3g[egps2]|avc1|mmp4)/
}

View file

@ -129,12 +129,11 @@ export {
## files based on the detected mime type of the file.
const analyze_by_mime_type_automatically = T &redef;
## The default setting for if the file reassembler is enabled for
## each file.
## The default setting for file reassembly.
const enable_reassembler = T &redef;
## The default per-file reassembly buffer size.
const reassembly_buffer_size = 1048576 &redef;
const reassembly_buffer_size = 524288 &redef;
## Allows the file reassembler to be used if it's necessary because the
## file is transferred out of order.
@ -313,7 +312,7 @@ global analyzer_add_callbacks: table[Files::Tag] of function(f: fa_file, args: A
event bro_init() &priority=5
{
Log::create_stream(Files::LOG, [$columns=Info, $ev=log_files]);
Log::create_stream(Files::LOG, [$columns=Info, $ev=log_files, $path="files"]);
}
function set_info(f: fa_file)
@ -484,16 +483,19 @@ event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priori
add f$info$rx_hosts[f$is_orig ? cid$resp_h : cid$orig_h];
}
event file_mime_type(f: fa_file, mime_type: string) &priority=10
event file_sniff(f: fa_file, meta: fa_metadata) &priority=10
{
set_info(f);
f$info$mime_type = mime_type;
if ( ! meta?$mime_type )
return;
f$info$mime_type = meta$mime_type;
if ( analyze_by_mime_type_automatically &&
mime_type in mime_type_to_analyzers )
meta$mime_type in mime_type_to_analyzers )
{
local analyzers = mime_type_to_analyzers[mime_type];
local analyzers = mime_type_to_analyzers[meta$mime_type];
for ( a in analyzers )
{
add f$info$analyzers[Files::analyzer_name(a)];

View file

@ -1,18 +1,25 @@
##! The input framework provides a way to read previously stored data either
##! as an event stream or into a bro table.
##! as an event stream or into a Bro table.
module Input;
export {
type Event: enum {
## New data has been imported.
EVENT_NEW = 0,
## Existing data has been changed.
EVENT_CHANGED = 1,
## Previously existing data has been removed.
EVENT_REMOVED = 2,
};
## Type that defines the input stream read mode.
type Mode: enum {
## Do not automatically reread the file after it has been read.
MANUAL = 0,
## Reread the entire file each time a change is found.
REREAD = 1,
## Read data from end of file each time new data is appended.
STREAM = 2
};
@ -24,20 +31,20 @@ export {
## Separator between fields.
## Please note that the separator has to be exactly one character long.
## Can be overwritten by individual writers.
## Individual readers can use a different value.
const separator = "\t" &redef;
## Separator between set elements.
## Please note that the separator has to be exactly one character long.
## Can be overwritten by individual writers.
## Individual readers can use a different value.
const set_separator = "," &redef;
## String to use for empty fields.
## Can be overwritten by individual writers.
## Individual readers can use a different value.
const empty_field = "(empty)" &redef;
## String to use for an unset &optional field.
## Can be overwritten by individual writers.
## Individual readers can use a different value.
const unset_field = "-" &redef;
## Flag that controls if the input framework accepts records
@ -47,11 +54,11 @@ export {
## abort. Defaults to false (abort).
const accept_unsupported_types = F &redef;
## TableFilter description type used for the `table` method.
## A table input stream type used to send data to a Bro table.
type TableDescription: record {
# Common definitions for tables and events
## String that allows the reader to find the source.
## String that allows the reader to find the source of the data.
## For `READER_ASCII`, this is the filename.
source: string;
@ -61,7 +68,8 @@ export {
## Read mode to use for this stream.
mode: Mode &default=default_mode;
## Descriptive name. Used to remove a stream at a later time.
## Name of the input stream. This is used by some functions to
## manipulate the stream.
name: string;
# Special definitions for tables
@ -73,31 +81,35 @@ export {
idx: any;
## Record that defines the values used as the elements of the table.
## If this is undefined, then *destination* has to be a set.
## If this is undefined, then *destination* must be a set.
val: any &optional;
## Defines if the value of the table is a record (default), or a single value.
## When this is set to false, then *val* can only contain one element.
## Defines if the value of the table is a record (default), or a single
## value. When this is set to false, then *val* can only contain one
## element.
want_record: bool &default=T;
## The event that is raised each time a value is added to, changed in or removed
## from the table. The event will receive an Input::Event enum as the first
## argument, the *idx* record as the second argument and the value (record) as the
## third argument.
ev: any &optional; # event containing idx, val as values.
## The event that is raised each time a value is added to, changed in,
## or removed from the table. The event will receive an
## Input::TableDescription as the first argument, an Input::Event
## enum as the second argument, the *idx* record as the third argument
## and the value (record) as the fourth argument.
ev: any &optional;
## Predicate function that can decide if an insertion, update or removal should
## really be executed. Parameters are the same as for the event. If true is
## returned, the update is performed. If false is returned, it is skipped.
## Predicate function that can decide if an insertion, update or removal
## should really be executed. Parameters have same meaning as for the
## event.
## If true is returned, the update is performed. If false is returned,
## it is skipped.
pred: function(typ: Input::Event, left: any, right: any): bool &optional;
## A key/value table that will be passed on the reader.
## Interpretation of the values is left to the writer, but
## A key/value table that will be passed to the reader.
## Interpretation of the values is left to the reader, but
## usually they will be used for configuration purposes.
config: table[string] of string &default=table();
config: table[string] of string &default=table();
};
## EventFilter description type used for the `event` method.
## An event input stream type used to send input data to a Bro event.
type EventDescription: record {
# Common definitions for tables and events
@ -116,19 +128,26 @@ export {
# Special definitions for events
## Record describing the fields to be retrieved from the source input.
## Record type describing the fields to be retrieved from the input
## source.
fields: any;
## If this is false, the event receives each value in fields as a separate argument.
## If this is set to true (default), the event receives all fields in a single record value.
## If this is false, the event receives each value in *fields* as a
## separate argument.
## If this is set to true (default), the event receives all fields in
## a single record value.
want_record: bool &default=T;
## The event that is raised each time a new line is received from the reader.
## The event will receive an Input::Event enum as the first element, and the fields as the following arguments.
## The event that is raised each time a new line is received from the
## reader. The event will receive an Input::EventDescription record
## as the first argument, an Input::Event enum as the second
## argument, and the fields (as specified in *fields*) as the following
## arguments (this will either be a single record value containing
## all fields, or each field value as a separate argument).
ev: any;
## A key/value table that will be passed on the reader.
## Interpretation of the values is left to the writer, but
## A key/value table that will be passed to the reader.
## Interpretation of the values is left to the reader, but
## usually they will be used for configuration purposes.
config: table[string] of string &default=table();
};
@ -155,28 +174,29 @@ export {
## field will be the same value as the *source* field.
name: string;
## A key/value table that will be passed on the reader.
## Interpretation of the values is left to the writer, but
## A key/value table that will be passed to the reader.
## Interpretation of the values is left to the reader, but
## usually they will be used for configuration purposes.
config: table[string] of string &default=table();
};
## Create a new table input from a given source.
## Create a new table input stream from a given source.
##
## description: `TableDescription` record describing the source.
##
## Returns: true on success.
global add_table: function(description: Input::TableDescription) : bool;
## Create a new event input from a given source.
## Create a new event input stream from a given source.
##
## description: `EventDescription` record describing the source.
##
## Returns: true on success.
global add_event: function(description: Input::EventDescription) : bool;
## Create a new file analysis input from a given source. Data read from
## the source is automatically forwarded to the file analysis framework.
## Create a new file analysis input stream from a given source. Data read
## from the source is automatically forwarded to the file analysis
## framework.
##
## description: A record describing the source.
##
@ -199,7 +219,11 @@ export {
## Event that is called when the end of a data source has been reached,
## including after an update.
global end_of_data: event(name: string, source:string);
##
## name: Name of the input stream.
##
## source: String that identifies the data source (such as the filename).
global end_of_data: event(name: string, source: string);
}
@load base/bif/input.bif

Some files were not shown because too many files have changed in this diff Show more