Merge remote-tracking branch 'origin/master' into topic/vladg/sip

This commit is contained in:
Vlad Grigorescu 2014-04-22 18:55:26 -04:00
commit 25d7b71c50
1460 changed files with 70473 additions and 191398 deletions

18
.gitmodules vendored
View file

@ -1,21 +1,21 @@
[submodule "aux/bro-aux"] [submodule "aux/bro-aux"]
path = aux/bro-aux path = aux/bro-aux
url = git://git.bro-ids.org/bro-aux url = git://git.bro.org/bro-aux
[submodule "aux/binpac"] [submodule "aux/binpac"]
path = aux/binpac path = aux/binpac
url = git://git.bro-ids.org/binpac url = git://git.bro.org/binpac
[submodule "aux/broccoli"] [submodule "aux/broccoli"]
path = aux/broccoli path = aux/broccoli
url = git://git.bro-ids.org/broccoli url = git://git.bro.org/broccoli
[submodule "aux/broctl"] [submodule "aux/broctl"]
path = aux/broctl path = aux/broctl
url = git://git.bro-ids.org/broctl url = git://git.bro.org/broctl
[submodule "aux/btest"] [submodule "aux/btest"]
path = aux/btest path = aux/btest
url = git://git.bro-ids.org/btest url = git://git.bro.org/btest
[submodule "cmake"] [submodule "cmake"]
path = cmake path = cmake
url = git://git.bro-ids.org/cmake url = git://git.bro.org/cmake
[submodule "magic"] [submodule "src/3rdparty"]
path = magic path = src/3rdparty
url = git://git.bro.org/bromagic url = git://git.bro.org/bro-3rdparty

11485
CHANGES

File diff suppressed because it is too large Load diff

View file

@ -10,24 +10,18 @@ if (NOT BRO_SCRIPT_INSTALL_PATH)
# set the default Bro script installation path (user did not specify one) # set the default Bro script installation path (user did not specify one)
set(BRO_SCRIPT_INSTALL_PATH ${BRO_ROOT_DIR}/share/bro) set(BRO_SCRIPT_INSTALL_PATH ${BRO_ROOT_DIR}/share/bro)
endif () endif ()
set(BRO_SCRIPT_SOURCE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/scripts)
# sanitize the Bro script install directory into an absolute path # sanitize the Bro script install directory into an absolute path
# (CMake is confused by ~ as a representation of home directory) # (CMake is confused by ~ as a representation of home directory)
get_filename_component(BRO_SCRIPT_INSTALL_PATH ${BRO_SCRIPT_INSTALL_PATH} get_filename_component(BRO_SCRIPT_INSTALL_PATH ${BRO_SCRIPT_INSTALL_PATH}
ABSOLUTE) ABSOLUTE)
set(BRO_MAGIC_INSTALL_PATH ${BRO_ROOT_DIR}/share/bro/magic)
set(BRO_MAGIC_SOURCE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/magic/database)
configure_file(bro-path-dev.in ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev) configure_file(bro-path-dev.in ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev)
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.sh file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.sh
"export BROPATH=`${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev`\n" "export BROPATH=`${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev`\n"
"export BROMAGIC=\"${BRO_MAGIC_SOURCE_PATH}\"\n"
"export PATH=\"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n") "export PATH=\"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n")
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.csh file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.csh
"setenv BROPATH `${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev`\n" "setenv BROPATH `${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev`\n"
"setenv BROMAGIC \"${BRO_MAGIC_SOURCE_PATH}\"\n"
"setenv PATH \"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n") "setenv PATH \"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n")
file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/VERSION" VERSION LIMIT_COUNT 1) file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/VERSION" VERSION LIMIT_COUNT 1)
@ -58,7 +52,6 @@ FindRequiredPackage(BISON)
FindRequiredPackage(PCAP) FindRequiredPackage(PCAP)
FindRequiredPackage(OpenSSL) FindRequiredPackage(OpenSSL)
FindRequiredPackage(BIND) FindRequiredPackage(BIND)
FindRequiredPackage(LibMagic)
FindRequiredPackage(ZLIB) FindRequiredPackage(ZLIB)
if (NOT BinPAC_ROOT_DIR AND if (NOT BinPAC_ROOT_DIR AND
@ -67,6 +60,10 @@ if (NOT BinPAC_ROOT_DIR AND
endif () endif ()
FindRequiredPackage(BinPAC) FindRequiredPackage(BinPAC)
if (ENABLE_JEMALLOC)
find_package(JeMalloc)
endif ()
if (MISSING_PREREQS) if (MISSING_PREREQS)
foreach (prereq ${MISSING_PREREQ_DESCS}) foreach (prereq ${MISSING_PREREQ_DESCS})
message(SEND_ERROR ${prereq}) message(SEND_ERROR ${prereq})
@ -74,19 +71,13 @@ if (MISSING_PREREQS)
message(FATAL_ERROR "Configuration aborted due to missing prerequisites") message(FATAL_ERROR "Configuration aborted due to missing prerequisites")
endif () endif ()
set(libmagic_req 5.04)
if ( LibMagic_VERSION VERSION_LESS ${libmagic_req} )
message(FATAL_ERROR "libmagic of at least version ${libmagic_req} required "
"(found ${LibMagic_VERSION})")
endif ()
include_directories(BEFORE include_directories(BEFORE
${PCAP_INCLUDE_DIR} ${PCAP_INCLUDE_DIR}
${OpenSSL_INCLUDE_DIR} ${OpenSSL_INCLUDE_DIR}
${BIND_INCLUDE_DIR} ${BIND_INCLUDE_DIR}
${BinPAC_INCLUDE_DIR} ${BinPAC_INCLUDE_DIR}
${LibMagic_INCLUDE_DIR}
${ZLIB_INCLUDE_DIR} ${ZLIB_INCLUDE_DIR}
${JEMALLOC_INCLUDE_DIR}
) )
# Optional Dependencies # Optional Dependencies
@ -153,9 +144,10 @@ if (NOT DISABLE_ELASTICSEARCH AND LIBCURL_FOUND)
list(APPEND OPTLIBS ${LibCURL_LIBRARIES}) list(APPEND OPTLIBS ${LibCURL_LIBRARIES})
endif() endif()
if (ENABLE_PERFTOOLS_DEBUG) if (ENABLE_PERFTOOLS_DEBUG OR ENABLE_PERFTOOLS)
# Just a no op to prevent CMake from complaining about manually-specified # Just a no op to prevent CMake from complaining about manually-specified
# ENABLE_PERFTOOLS_DEBUG not being used if google perftools weren't found # ENABLE_PERFTOOLS_DEBUG or ENABLE_PERFTOOLS not being used if google
# perftools weren't found
endif () endif ()
set(brodeps set(brodeps
@ -163,8 +155,8 @@ set(brodeps
${PCAP_LIBRARY} ${PCAP_LIBRARY}
${OpenSSL_LIBRARIES} ${OpenSSL_LIBRARIES}
${BIND_LIBRARY} ${BIND_LIBRARY}
${LibMagic_LIBRARY}
${ZLIB_LIBRARY} ${ZLIB_LIBRARY}
${JEMALLOC_LIBRARIES}
${OPTLIBS} ${OPTLIBS}
) )
@ -201,10 +193,6 @@ CheckOptionalBuildSources(aux/broctl Broctl INSTALL_BROCTL)
CheckOptionalBuildSources(aux/bro-aux Bro-Aux INSTALL_AUX_TOOLS) CheckOptionalBuildSources(aux/bro-aux Bro-Aux INSTALL_AUX_TOOLS)
CheckOptionalBuildSources(aux/broccoli Broccoli INSTALL_BROCCOLI) CheckOptionalBuildSources(aux/broccoli Broccoli INSTALL_BROCCOLI)
install(DIRECTORY ./magic/database/
DESTINATION ${BRO_MAGIC_INSTALL_PATH}
)
######################################################################## ########################################################################
## Packaging Setup ## Packaging Setup
@ -249,6 +237,7 @@ message(
"\ngperftools found: ${HAVE_PERFTOOLS}" "\ngperftools found: ${HAVE_PERFTOOLS}"
"\n tcmalloc: ${USE_PERFTOOLS_TCMALLOC}" "\n tcmalloc: ${USE_PERFTOOLS_TCMALLOC}"
"\n debugging: ${USE_PERFTOOLS_DEBUG}" "\n debugging: ${USE_PERFTOOLS_DEBUG}"
"\njemalloc: ${ENABLE_JEMALLOC}"
"\ncURL: ${USE_CURL}" "\ncURL: ${USE_CURL}"
"\n" "\n"
"\nDataSeries: ${USE_DATASERIES}" "\nDataSeries: ${USE_DATASERIES}"

View file

@ -1,4 +1,4 @@
Copyright (c) 1995-2012, The Regents of the University of California Copyright (c) 1995-2013, The Regents of the University of California
through the Lawrence Berkeley National Laboratory and the through the Lawrence Berkeley National Laboratory and the
International Computer Science Institute. All rights reserved. International Computer Science Institute. All rights reserved.

313
INSTALL
View file

@ -1,314 +1,3 @@
.. _CMake: http://www.cmake.org
.. _SWIG: http://www.swig.org
.. _Xcode: https://developer.apple.com/xcode/
.. _MacPorts: http://www.macports.org
.. _Fink: http://www.finkproject.org
.. _Homebrew: http://mxcl.github.com/homebrew
.. _bro downloads page: http://bro.org/download/index.html
============== See doc/install/install.rst for installation instructions.
Installing Bro
==============
Bro can be downloaded in either pre-built binary package or
source code forms.
Prerequisites
=============
Bro requires the following libraries and tools to be installed
before you begin:
* Libpcap http://www.tcpdump.org
* OpenSSL libraries http://www.openssl.org
* BIND8 library
* Libmagic
* Libz
* Bash (for BroControl)
To build Bro from source, the following additional dependencies are required:
* CMake 2.6.3 or greater http://www.cmake.org
* SWIG http://www.swig.org
* Bison (GNU Parser Generator)
* Flex (Fast Lexical Analyzer)
* Libpcap headers http://www.tcpdump.org
* OpenSSL headers http://www.openssl.org
* libmagic headers
* zlib headers
* Perl
Bro can make use of some optional libraries and tools if they are found at
build time:
* LibGeoIP (for geo-locating IP addresses)
* gperftools (tcmalloc is used to improve memory and CPU usage)
* sendmail (for BroControl)
* ipsumdump (for trace-summary) http://www.cs.ucla.edu/~kohler/ipsumdump
* Ruby executable, library, and headers (for Broccoli Ruby bindings)
Installing From Pre-Built Binary Release Packages
=================================================
See the `bro downloads page`_ for currently supported/targeted platforms.
* RPM
.. console::
sudo yum localinstall Bro-*.rpm
* DEB
.. console::
sudo gdebi Bro-*.deb
* MacOS Disk Image with Installer
Just open the ``Bro-*.dmg`` and then run the ``.pkg`` installer.
Everything installed by the package will go into ``/opt/bro``.
The primary install prefix for binary packages is ``/opt/bro``.
Non-MacOS packages that include BroControl also put variable/runtime
data (e.g. Bro logs) in ``/var/opt/bro``.
Installing From Source
======================
Required Dependencies
~~~~~~~~~~~~~~~~~~~~~
The following dependencies are required to build Bro:
* RPM/RedHat-based Linux:
.. console::
sudo yum install cmake make gcc gcc-c++ flex bison libpcap-devel openssl-devel python-devel swig zlib-devel file-devel
* DEB/Debian-based Linux:
.. console::
sudo apt-get install cmake make gcc g++ flex bison libpcap-dev libssl-dev python-dev swig zlib1g-dev libmagic-dev
* FreeBSD
Most required dependencies should come with a minimal FreeBSD install
except for the following.
.. console::
sudo pkg_add -r bash cmake swig bison python
Note that ``bash`` needs to be in ``PATH``, which by default it is
not. The FreeBSD package installs the binary into
``/usr/local/bin``.
* Mac OS X
Compiling source code on Macs requires first downloading Xcode_,
then going through its "Preferences..." -> "Downloads" menus to
install the "Command Line Tools" component.
Lion (10.7) and Mountain Lion (10.8) come with all required
dependencies except for CMake_, SWIG_, and ``libmagic``.
Distributions of these dependencies can likely be obtained from your
preferred Mac OS X package management system (e.g. MacPorts_, Fink_,
or Homebrew_).
Specifically for MacPorts, the ``swig``, ``swig-ruby``, ``swig-python``
and ``file`` packages provide the required dependencies.
Optional Dependencies
~~~~~~~~~~~~~~~~~~~~~
Bro can use libGeoIP for geo-locating IP addresses, and sendmail for
sending emails.
* RedHat Enterprise Linux:
.. console::
sudo yum install geoip-devel sendmail
* CentOS Linux:
.. console::
sudo yum install GeoIP-devel sendmail
* DEB/Debian-based Linux:
.. console::
sudo apt-get install libgeoip-dev sendmail
* Ports-based FreeBSD
.. console::
sudo pkg_add -r GeoIP
sendmail is typically already available.
* Mac OS X
Vanilla OS X installations don't ship with libGeoIP, but
if installed from your preferred package management system (e.g. MacPorts,
Fink, or Homebrew), they should be automatically detected and Bro will
compile against them.
Additional steps may be needed to :doc:`get the right GeoIP database <geoip>`.
Compiling Bro Source Code
~~~~~~~~~~~~~~~~~~~~~~~~~
Bro releases are bundled into source packages for convenience and
available from the `bro downloads page`_.
Alternatively, the latest Bro development version can be obtained through git
repositories hosted at `git.bro.org <http://git.bro.org>`_. See
our `git development documentation
<http://bro.org/development/process.html>`_ for comprehensive
information on Bro's use of git revision control, but the short story
for downloading the full source code experience for Bro via git is:
.. console::
git clone --recursive git://git.bro.org/bro
.. note:: If you choose to clone the ``bro`` repository non-recursively for
a "minimal Bro experience", be aware that compiling it depends on
BinPAC, which has its own ``binpac`` repository. Either install it
first or initialize/update the cloned ``bro`` repository's
``aux/binpac`` submodule.
The typical way to build and install from source is (for more options,
run ``./configure --help``):
.. console::
./configure
make
make install
The default installation path is ``/usr/local/bro``, which would typically
require root privileges when doing the ``make install``. A different
installation path can be chosen by specifying the ``--prefix`` option.
Note that ``/usr`` and ``/opt/bro`` are the
standard prefixes for binary Bro packages to be installed, so those are
typically not good choices unless you are creating such a package.
Depending on the Bro package you downloaded, there may be auxiliary
tools and libraries available in the ``aux/`` directory. Some of them
will be automatically built and installed along with Bro. There are
``--disable-*`` options that can be given to the configure script to
turn off unwanted auxiliary projects that would otherwise be installed
automatically. Finally, use ``make install-aux`` to install some of
the other programs that are in the ``aux/bro-aux`` directory.
OpenBSD users, please see our FAQ at
http://www.bro.org/documentation/faq.html if you are having
problems installing Bro.
Upgrading From a Previous Version of Bro
========================================
If you're doing an upgrade install (rather than a fresh install),
there's two suggested approaches: either install Bro using the same
installation prefix directory as before, or pick a new prefix and copy
local customizations over.
Re-Use Previous Install Prefix
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you choose to configure and install Bro with the same prefix
directory as before, local customization and configuration to files in
``$prefix/share/bro/site`` and ``$prefix/etc`` won't be overwritten
(``$prefix`` indicating the root of where Bro was installed). Also, logs
generated at run-time won't be touched by the upgrade. (But making
a backup of local changes before upgrading is still recommended.)
After upgrading, remember to check ``$prefix/share/bro/site`` and
``$prefix/etc`` for ``.example`` files, which indicate the
distribution's version of the file differs from the local one, which may
include local changes. Review the differences, and make adjustments
as necessary (for differences that aren't the result of a local change,
use the new version's).
Pick a New Install prefix
~~~~~~~~~~~~~~~~~~~~~~~~~
If you want to install the newer version in a different prefix
directory than before, you can just copy local customization and
configuration files from ``$prefix/share/bro/site`` and ``$prefix/etc``
to the new location (``$prefix`` indicating the root of where Bro was
originally installed). Make sure to review the files for difference
before copying and make adjustments as necessary (for differences that
aren't the result of a local change, use the new version's). Of
particular note, the copied version of ``$prefix/etc/broctl.cfg`` is
likely to need changes to the ``SpoolDir`` and ``LogDir`` settings.
Configure the Run-Time Environment
==================================
Just remember that you may need to adjust your ``PATH`` environment variable
according to the platform/shell/package you're using. For example:
Bourne-Shell Syntax:
.. console::
export PATH=/usr/local/bro/bin:$PATH
C-Shell Syntax:
.. console::
setenv PATH /usr/local/bro/bin:$PATH
Or substitute ``/opt/bro/bin`` instead if you installed from a binary package.
Running Bro
===========
Bro is a complex program and it takes a bit of time to get familiar
with it. A good place for newcomers to start is the Quick Start Guide
at http://www.bro.org/documentation/quickstart.html.
For developers that wish to run Bro directly from the ``build/``
directory (i.e., without performing ``make install``), they will have
to first adjust ``BROPATH`` to look for scripts inside the build
directory. Sourcing either ``build/bro-path-dev.sh`` or
``build/bro-path-dev.csh`` as appropriate for the current shell
accomplishes this and also augments your ``PATH`` so you can use the
Bro binary directly::
./configure
make
source build/bro-path-dev.sh
bro <options>

View file

@ -6,7 +6,7 @@
# #
BUILD=build BUILD=build
REPO=`basename \`git config --get remote.origin.url\`` REPO=`basename \`git config --get remote.origin.url | sed 's/^[^:]*://g'\``
VERSION_FULL=$(REPO)-`cat VERSION` VERSION_FULL=$(REPO)-`cat VERSION`
VERSION_MIN=$(REPO)-`cat VERSION`-minimal VERSION_MIN=$(REPO)-`cat VERSION`-minimal
HAVE_MODULES=git submodule | grep -v cmake >/dev/null HAVE_MODULES=git submodule | grep -v cmake >/dev/null
@ -29,28 +29,17 @@ doc: configured
docclean: configured docclean: configured
$(MAKE) -C $(BUILD) $@ $(MAKE) -C $(BUILD) $@
restdoc: configured
$(MAKE) -C $(BUILD) $@
restclean: configured
$(MAKE) -C $(BUILD) $@
broxygen: configured
$(MAKE) -C $(BUILD) $@
broxygenclean: configured
$(MAKE) -C $(BUILD) $@
dist: dist:
@rm -rf $(VERSION_FULL) $(VERSION_FULL).tgz @rm -rf $(VERSION_FULL) $(VERSION_FULL).tgz
@rm -rf $(VERSION_MIN) $(VERSION_MIN).tgz @rm -rf $(VERSION_MIN) $(VERSION_MIN).tgz
@mkdir $(VERSION_FULL) @git clone --recursive . $(VERSION_FULL) >/dev/null 2>&1
@tar --exclude=$(VERSION_FULL)* --exclude=$(VERSION_MIN)* --exclude=.git -cf - . | ( cd $(VERSION_FULL) && tar -xpf - ) @find $(VERSION_FULL) -name .git\* | xargs rm -rf
@( cd $(VERSION_FULL) && cp -R ../.git . && git reset -q --hard HEAD && git clean -xdfq && rm -rf .git )
@tar -czf $(VERSION_FULL).tgz $(VERSION_FULL) && echo Package: $(VERSION_FULL).tgz && rm -rf $(VERSION_FULL) @tar -czf $(VERSION_FULL).tgz $(VERSION_FULL) && echo Package: $(VERSION_FULL).tgz && rm -rf $(VERSION_FULL)
@$(HAVE_MODULES) && mkdir $(VERSION_MIN) || exit 0 @$(HAVE_MODULES) && git clone . $(VERSION_MIN) >/dev/null 2>&1 || exit 0
@$(HAVE_MODULES) && tar --exclude=$(VERSION_FULL)* --exclude=$(VERSION_MIN)* --exclude=.git `git submodule | awk '{print "--exclude="$$2}' | grep -v cmake | tr '\n' ' '` -cf - . | ( cd $(VERSION_MIN) && tar -xpf - ) || exit 0 @$(HAVE_MODULES) && (cd $(VERSION_MIN) && git submodule update --init cmake >/dev/null 2>&1) || exit 0
@$(HAVE_MODULES) && ( cd $(VERSION_MIN) && cp -R ../.git . && git reset -q --hard HEAD && git clean -xdfq && rm -rf .git ) || exit 0 @$(HAVE_MODULES) && (cd $(VERSION_MIN) && git submodule update --init src/3rdparty >/dev/null 2>&1) || exit 0
@$(HAVE_MODULES) && (cd $(VERSION_MIN) && git submodule update --init magic >/dev/null 2>&1) || exit 0
@$(HAVE_MODULES) && find $(VERSION_MIN) -name .git\* | xargs rm -rf || exit 0
@$(HAVE_MODULES) && tar -czf $(VERSION_MIN).tgz $(VERSION_MIN) && echo Package: $(VERSION_MIN).tgz && rm -rf $(VERSION_MIN) || exit 0 @$(HAVE_MODULES) && tar -czf $(VERSION_MIN).tgz $(VERSION_MIN) && echo Package: $(VERSION_MIN).tgz && rm -rf $(VERSION_MIN) || exit 0
bindist: bindist:
@ -65,6 +54,7 @@ test:
test-all: test test-all: test
test -d aux/broctl && ( cd aux/broctl && make test ) test -d aux/broctl && ( cd aux/broctl && make test )
test -d aux/btest && ( cd aux/btest && make test )
configured: configured:
@test -d $(BUILD) || ( echo "Error: No build/ directory found. Did you run configure?" && exit 1 ) @test -d $(BUILD) || ( echo "Error: No build/ directory found. Did you run configure?" && exit 1 )

853
NEWS
View file

@ -1,53 +1,259 @@
Release Notes
=============
This document summarizes the most important changes in the current Bro This document summarizes the most important changes in the current Bro
release. For a complete list of changes, see the ``CHANGES`` file release. For an exhaustive list of changes, see the ``CHANGES`` file
(note that submodules, such as BroControl and Broccoli, come with (note that submodules, such as BroControl and Broccoli, come with
their own CHANGES.) their own ``CHANGES``.)
Bro 2.2 Bro 2.3
------- =======
[In progress]
Dependencies
------------
- Bro no longer requires a pre-installed libmagic (because it now
ships its own).
- Libmagic is no longer a dependency.
New Functionality New Functionality
~~~~~~~~~~~~~~~~~ -----------------
- Support for GRE tunnel decapsulation, including enhanced GRE
headers. GRE tunnels are treated just like IP-in-IP tunnels by
parsing past the GRE header in between the delivery and payload IP
packets.
- The DNS analyzer now actually generates the dns_SRV_reply() event.
It had been documented before, yet was never raised.
- Bro now uses "file magic signatures" to identify file types. These
are defined via two new constructs in the signature rule parsing
grammar: "file-magic" gives a regular expression to match against,
and "file-mime" gives the MIME type string of content that matches
the magic and an optional strength value for the match. (See also
"Changed Functionality" below for changes due to switching from
using libmagic to such wsignatures.)
- A new built-in function, "file_magic", can be used to get all file
magic matches and their corresponding strength against a given chunk
of data.
Changed Functionality
---------------------
- string slices now exclude the end index (e.g., "123"[1:2] returns
"2"). Generally, Bro's string slices now behave similar to Python.
- ssl_client_hello() now receives a vector of ciphers, instead of a
set, to preserve their order.
- Notice::end_suppression() has been removed.
- Bro now parses X.509 extensions headers and, as a result, the
corresponding event got a new signature:
event x509_extension(c: connection, is_orig: bool, cert: X509, ext: X509_extension_info);
- Bro no longer special-cases SYN/FIN/RST-filtered traces by not
reporting missing data. The old behavior can be reverted by
redef'ing "detect_filtered_trace".
TODO: Update if we add a detector for filtered traces.
- We have removed the packet sorter component.
- Bro no longer uses libmagic to identify file types but instead now
comes with its own signature library (which initially is still
derived from libmagic;s database). This leads to a number of further
changes with regards to MIME types:
* The second parameter of the "identify_data" built-in function
can no longer be used to get verbose file type descriptions,
though it can still be used to get the strongest matching file
magic signature.
* The "file_transferred" event's "descr" parameter no longer
contains verbose file type descriptions.
* The BROMAGIC environment variable no longer changes any behavior
in Bro as magic databases are no longer used/installed.
* Removed "binary" and "octet-stream" mime type detections. They
don' provide any more information than an uninitialized
mime_type field.
* The "fa_file" record now contains a "mime_types" field that
contains all magic signatures that matched the file content
(where the "mime_type" field is just a shortcut for the
strongest match).
Bro 2.2
=======
New Functionality
-----------------
- A completely overhauled intelligence framework for consuming
external intelligence data. It provides an abstracted mechanism
for feeding data into the framework to be matched against the
data available. It also provides a function named ``Intel::match``
which makes any hits on intelligence data available to the
scripting language.
Using input framework, the intel framework can load data from
text files. It can also update and add data if changes are
made to the file being monitored. Files to monitor for
intelligence can be provided by redef-ing the
``Intel::read_files`` variable.
The intel framework is cluster-ready. On a cluster, the
manager is the only node that needs to load in data from disk,
the cluster support will distribute the data across a cluster
automatically.
Scripts are provided at ``policy/frameworks/intel/seen`` that
provide a broad set of sources of data to feed into the intel
framwork to be matched.
- A new file analysis framework moves most of the processing of file
content from script-land into the core, where it belongs. See
``doc/file-analysis.rst``, or the online documentation, for more
information.
Much of this is an internal change, but the framework also comes
with the following user-visible functionality (some of that was
already available before but is done differently, and more
efficiently, now):
- HTTP:
* Identify MIME type of messages.
* Extract messages to disk.
* Compute MD5 for messages.
- SMTP:
* Identify MIME type of messages.
* Extract messages to disk.
* Compute MD5 for messages.
* Provide access to start of entity data.
- FTP data transfers:
* Identify MIME types of data.
* Record to disk.
- IRC DCC transfers: Record to disk.
- Support for analyzing data transferred via HTTP range requests.
- A binary input reader interfaces the input framework with the
file analysis, allowing to inject files on disk into Bro's
content processing.
- A new framework for computing a wide array of summary statistics,
such as counters and thresholds checks, standard deviation and mean,
set cardinality, top K, and more. The framework operates in
real-time, independent of the underlying data, and can aggregate
information from many independent monitoring points (including
clusters). It provides a transparent, easy-to-use user interface,
and can optionally deploy a set of probabilistic data structures for
memory-efficient operation. The framework is located in
``scripts/base/frameworks/sumstats``.
A number of new applications now ship with Bro that are built on top
of the summary statistics framework:
* Scan detection: Detectors for port and address scans. See
``policy/misc/scan.bro`` (these scan detectors used to exist in
Bro versions <2.0; it's now back, but quite different).
* Tracerouter detector: ``policy/misc/detect-traceroute.bro``
* Web application detection/measurement:
``policy/misc/app-stats/*``
* FTP and SSH brute-forcing detector:
``policy/protocols/ftp/detect-bruteforcing.bro``,
``policy/protocols/ssh/detect-bruteforcing.bro``
* HTTP-based SQL injection detector:
``policy/protocols/http/detect-sqli.bro`` (existed before, but
now ported to the new framework)
- GridFTP support. This is an extension to the standard FTP analyzer
and includes:
- An analyzer for the GSI mechanism of GSSAPI FTP AUTH method.
GSI authentication involves an encoded TLS/SSL handshake over
the FTP control session. For FTP sessions that attempt GSI
authentication, the ``service`` field of the connection log
will include ``gridftp`` (as well as also ``ftp`` and
``ssl``).
- An example of a GridFTP data channel detection script. It
relies on the heuristics of GridFTP data channels commonly
default to SSL mutual authentication with a NULL bulk cipher
and that they usually transfer large datasets (default
threshold of script is 1 GB). For identified GridFTP data
channels, the ``services`` fields of the connection log will
include ``gridftp-data``.
- Modbus and DNP3 support. Script-level support is only basic at this
point but see ``src/analyzer/protocol/{modbus,dnp3}/events.bif``, or
the online documentation, for the events Bro generates. For Modbus,
there are also some example policies in
``policy/protocols/modbus/*``.
- The documentation now includes a new introduction to writing Bro
scripts. See ``doc/scripting/index.rst`` or, much better, the online
version. There's also the beginning of a chapter on "Using Bro" in
``doc/using/index.rst``.
- GPRS Tunnelling Protocol (GTPv1) decapsulation. - GPRS Tunnelling Protocol (GTPv1) decapsulation.
- GridFTP support. TODO: Extend. - The scripting language now provide "hooks", a new flavor of
functions that share characteristics of both standard functions and
events. They are like events in that multiple bodies can be defined
for the same hook identifier. They are more like functions in the
way they are invoked/called, because, unlike events, their execution
is immediate and they do not get scheduled through an event queue.
Also, a unique feature of a hook is that a given hook handler body
can short-circuit the execution of remaining hook handlers simply by
exiting from the body as a result of a ``break`` statement (as
opposed to a ``return`` or just reaching the end of the body). See
``doc/scripts/builtins.rst``, or the online documentation, for more
informatin.
- ssl.log now also records the subject client and issuer certificates. - Bro's language now has a working ``switch`` statement that generally
behaves like C-style switches (except that case labels can be
comprised of multiple literal constants delimited by commas). Only
atomic types are allowed for now. Case label bodies that don't
execute a ``return`` or ``break`` statement will fall through to
subsequent cases. A ``default`` case label is supported.
- Hooks: TODO: Briefly summarize the documention from - Bro's language now has a new set of types ``opaque of X``. Opaque
doc/scripts/builtins.rst here.
- The ASCII writer can now output CSV files on a per filter basis.
- Bro's language now has a working "switch" statement that generally
behaves like C-style switches except case labels can be comprised of
multiple literal constants delimited by commas. Only atomic types
are allowed for now. Case label bodies that don't execute a
"return" or "break" statement will fall through to subsequent cases.
A default case label is allowed.
- Bro's language now has a new set of types "opaque of X". Opaque
values can be passed around like other values but they can only be values can be passed around like other values but they can only be
manipulated with BiF functions, not with other operators. Currently, manipulated with BiF functions, not with other operators. Currently,
the following opaque types are supported: the following opaque types are supported::
- opaque of md5 opaque of md5
- opaque of sha1 opaque of sha1
- opaque of sha256 opaque of sha256
- opaquey of entropy. opaque of cardinality
opaque of topk
opaque of bloomfilter
They go along with the corrsponding BiF functions md5_*, sha1_*, These go along with the corrsponding BiF functions ``md5_*``,
sha256_*, and entropy_*, respectively. Note that these functions ``sha1_*``, ``sha256_*``, ``entropy_*``, etc. . Note that where
have changed their signatures to work with opaques types rather these functions existed before, they have changed their signatures
than global state as it was before. to work with opaques types rather than global state.
- The scripting language now supports a constructing sets, tables, - The scripting language now supports constructing sets, tables,
vectors, and records by name: vectors, and records by name::
type MyRecordType: record { type MyRecordType: record {
c: count; c: count;
@ -60,114 +266,196 @@ New Functionality
global s = MySet([$c=1], [$c=2]); global s = MySet([$c=1], [$c=2]);
- Strings now support the subscript operator to extract individual - Strings now support the subscript operator to extract individual
characters and substrings (e.g., s[4], s[1,5]). The index expression characters and substrings (e.g., ``s[4]``, ``s[1:5]``). The index
can take up to two indices for the start and end index of the expression can take up to two indices for the start and end index of
substring to return (e.g. "mystring[1,3]"). the substring to return (e.g. ``mystring[1:3]``).
- Functions now support default parameters, e.g.: - Functions now support default parameters, e.g.::
global foo: function(s: string, t: string &default="abc", u: count &default=0); global foo: function(s: string, t: string &default="abc", u: count &default=0);
- Scripts can now use two new "magic constants" @DIR and @FILENAME - Scripts can now use two new "magic constants" ``@DIR`` and
that expand to the directory path of the current script and just the ``@FILENAME`` that expand to the directory path of the current
script file name without path, respectively. (Jon Siwek) script and just the script file name without path, respectively.
- The new file analysis framework moves most of the processing of file - ``ssl.log`` now also records the subject client and issuer
content from script-land into the core, where it belongs. See certificates.
doc/file-analysis.rst for more information.
Much of this is an internal change, but the framework also comes - The ASCII writer can now output CSV files on a per filter basis.
with the following user-visibible functionality (some of that was
already available before, but done differently):
[TODO: Update with changes from 984e9793db56.] - New SQLite reader and writer plugins for the logging framework allow
to read/write persistent data from on disk SQLite databases.
- A binary input reader interfaces the input framework with file - A new packet filter framework supports BPF-based load-balancing,
analysis, allowing to inject files on disk into Bro's
processing.
- Supports for analyzing data transfereed via HTTP range
requests.
- HTTP:
* Identify MIME type of message.
* Extract message to disk.
* Compute MD5 for messages.
- SMTP:
* Identify MIME type of message.
* Extract message to disk.
* Compute MD5 for messages.
* Provide access to start of entity data.
- FTP data transfers: Identify MIME type; record to disk.
- IRC DCC transfers: Record to disk.
- New packet filter framework supports BPF-based load-balancing,
shunting, and sampling; plus plugin support to customize filters shunting, and sampling; plus plugin support to customize filters
dynamically. dynamically.
- Bro now provides Bloom filters of two kinds: basic Bloom filters - Bro now provides Bloom filters of two kinds: basic Bloom filters
supporting membership tests, and counting Bloom filters that track supporting membership tests, and counting Bloom filters that track
the frequency of elements. The corresponding functions are: the frequency of elements. The corresponding functions are::
bloomfilter_basic_init(fp: double, capacity: count, name: string &default=""): opaque of bloomfilter bloomfilter_basic_init(fp: double, capacity: count, name: string &default=""): opaque of bloomfilter
bloomfilter_basic_init2(k: count, cells: count, name: string &default=""): opaque of bloomfilter
bloomfilter_counting_init(k: count, cells: count, max: count, name: string &default=""): opaque of bloomfilter bloomfilter_counting_init(k: count, cells: count, max: count, name: string &default=""): opaque of bloomfilter
bloomfilter_add(bf: opaque of bloomfilter, x: any) bloomfilter_add(bf: opaque of bloomfilter, x: any)
bloomfilter_lookup(bf: opaque of bloomfilter, x: any): count bloomfilter_lookup(bf: opaque of bloomfilter, x: any): count
bloomfilter_merge(bf1: opaque of bloomfilter, bf2: opaque of bloomfilter): opaque of bloomfilter bloomfilter_merge(bf1: opaque of bloomfilter, bf2: opaque of bloomfilter): opaque of bloomfilter
bloomfilter_clear(bf: opaque of bloomfilter) bloomfilter_clear(bf: opaque of bloomfilter)
See <INSERT LINK> for full documentation. See ``src/probabilistic/bloom-filter.bif``, or the online
documentation, for full documentation.
- base/utils/exec.bro provides a module to start external processes - Bro now provides a probabilistic data structure for computing
asynchronously and retrieve their output on termination. "top k" elements. The corresponding functions are::
base/utils/dir.bro uses it to monitor a directory for changes, and
base/utils/active-http.bro for providing an interface for querying topk_init(size: count): opaque of topk
remote web servers. topk_add(handle: opaque of topk, value: any)
topk_get_top(handle: opaque of topk, k: count)
topk_count(handle: opaque of topk, value: any): count
topk_epsilon(handle: opaque of topk, value: any): count
topk_size(handle: opaque of topk): count
topk_sum(handle: opaque of topk): count
topk_merge(handle1: opaque of topk, handle2: opaque of topk)
topk_merge_prune(handle1: opaque of topk, handle2: opaque of topk)
See ``src/probabilistic/top-k.bif``, or the online documentation,
for full documentation.
- Bro now provides a probabilistic data structure for computing set
cardinality, using the HyperLogLog algorithm. The corresponding
functions are::
hll_cardinality_init(err: double, confidence: double): opaque of cardinality
hll_cardinality_add(handle: opaque of cardinality, elem: any): bool
hll_cardinality_merge_into(handle1: opaque of cardinality, handle2: opaque of cardinality): bool
hll_cardinality_estimate(handle: opaque of cardinality): double
hll_cardinality_copy(handle: opaque of cardinality): opaque of cardinality
See ``src/probabilistic/cardinality-counter.bif``, or the online
documentation, for full documentation.
- ``base/utils/exec.bro`` provides a module to start external
processes asynchronously and retrieve their output on termination.
``base/utils/dir.bro`` uses it to monitor a directory for changes,
and ``base/utils/active-http.bro`` for providing an interface for
querying remote web servers.
- BroControl can now pin Bro processes to CPUs on supported platforms:
To use CPU pinning, a new per-node option ``pin_cpus`` can be
specified in node.cfg if the OS is either Linux or FreeBSD.
- BroControl now returns useful exit codes. Most BroControl commands
return 0 if everything was OK, and 1 otherwise. However, there are
a few exceptions. The "status" and "top" commands return 0 if all Bro
nodes are running, and 1 if not all nodes are running. The "cron"
command always returns 0 (but it still sends email if there were any
problems). Any command provided by a plugin always returns 0.
- BroControl now has an option "env_vars" to set Bro environment variables.
The value of this option is a comma-separated list of environment variable
assignments (e.g., "VAR1=value, VAR2=another"). The "env_vars" option
can apply to all Bro nodes (by setting it in broctl.cfg), or can be
node-specific (by setting it in node.cfg). Environment variables in
node.cfg have priority over any specified in broctl.cfg.
- BroControl now supports load balancing with PF_RING while sniffing
multiple interfaces. Rather than assigning the same PF_RING cluster ID
to all workers on a host, cluster ID assignment is now based on which
interface a worker is sniffing (i.e., all workers on a host that sniff
the same interface will share a cluster ID). This is handled by
BroControl automatically.
- BroControl has several new options: MailConnectionSummary (for
disabling the sending of connection summary report emails),
MailAlarmsInterval (for specifying a different interval to send alarm
summary emails), CompressCmd (if archived log files will be compressed,
this specifies the command that will be used to compress them),
CompressExtension (if archived log files will be compressed, this
specifies the file extension to use).
- BroControl comes with its own test-suite now. ``make test`` in
``aux/broctl`` will run it.
In addition to these, Bro 2.2 comes with a large set of smaller
extensions, tweaks, and fixes across the whole code base, including
most submodules.
Changed Functionality Changed Functionality
~~~~~~~~~~~~~~~~~~~~~ ---------------------
- We removed the following, already deprecated, functionality: - Previous versions of ``$prefix/share/bro/site/local.bro`` (where
"$prefix" indicates the installation prefix of Bro), aren't compatible
with Bro 2.2. This file won't be overwritten when installing over a
previous Bro installation to prevent clobbering users' modifications,
but an example of the new version is located in
``$prefix/share/bro/site/local.bro.example``. So if no modification
has been done to the previous local.bro, just copy the new example
version over it, else merge in the differences. For reference,
a common error message when attempting to use an outdated local.bro
looks like::
fatal error in /usr/local/bro/share/bro/policy/frameworks/software/vulnerable.bro, line 41: BroType::AsRecordType (table/record) (set[record { min:record { major:count; minor:count; minor2:count; minor3:count; addl:string; }; max:record { major:count; minor:count; minor2:count; minor3:count; addl:string; }; }])
- The type of ``Software::vulnerable_versions`` changed to allow
more flexibility and range specifications. An example usage:
.. code:: bro
const java_1_6_vuln = Software::VulnerableVersionRange(
$max = Software::Version($major = 1, $minor = 6, $minor2 = 0, $minor3 = 44)
);
const java_1_7_vuln = Software::VulnerableVersionRange(
$min = Software::Version($major = 1, $minor = 7),
$max = Software::Version($major = 1, $minor = 7, $minor2 = 0, $minor3 = 20)
);
redef Software::vulnerable_versions += {
["Java"] = set(java_1_6_vuln, java_1_7_vuln)
};
- The interface to extracting content from application-layer protocols
(including HTTP, SMTP, FTP) has changed significantly due to the
introduction of the new file analysis framework (see above).
- Removed the following, already deprecated, functionality:
* Scripting language: * Scripting language:
- &disable_print_hook attribute. - ``&disable_print_hook attribute``.
* BiF functions: * BiF functions:
- parse_dotted_addr(), dump_config(), - ``parse_dotted_addr()``, ``dump_config()``,
make_connection_persistent(), generate_idmef(), ``make_connection_persistent()``, ``generate_idmef()``,
split_complete() ``split_complete()``
- md5_*, sha1_*, sha256_*, and entropy_* have all changed - ``md5_*``, ``sha1_*``, ``sha256_*``, and ``entropy_*`` have
their signatures to work with opaque types (see above). all changed their signatures to work with opaque types (see
above).
- Removed a now unused argument from ``do_split`` helper function.
- Removed a now unused argument from "do_split" helper function. - ``this`` is no longer a reserved keyword.
- "this" is no longer a reserved keyword. - The Input Framework's ``update_finished`` event has been renamed to
``end_of_data``. It will now not only fire after table-reads have
- The Input Framework's update_finished event has been renamed to been completed, but also after the last event of a whole-file-read
end_of_data. It will now not only fire after table-reads have been (or whole-db-read, etc.).
completed, but also after the last event of a whole-file-read (or
whole-db-read, etc.).
- Renamed the option defining the frequency of alarm summary mails to - Renamed the option defining the frequency of alarm summary mails to
'Logging::default_alarm_mail_interval'. When using BroControl, the ``Logging::default_alarm_mail_interval``. When using BroControl, the
value can now be set with the new broctl.cfg option value can now be set with the new broctl.cfg option
"MailAlarmsInterval". ``MailAlarmsInterval``.
- We have completely reworded the "notice_policy" mechanism. It now no - We have completely rewritten the ``notice_policy`` mechanism. It now
linger uses a record of policy items but a "hook", a new language no longer uses a record of policy items but a ``hook``, a new
element that's roughly equivalent to a function with multiple language element that's roughly equivalent to a function with
bodies. The documentation [TODO: insert link] describes how to use multiple bodies (see above). For existing code, the two main changes
the new notice policy. For existing code, the two main changes are: are:
- What used to be a "redef" of "Notice::policy" now becomes a hook - What used to be a ``redef`` of ``Notice::policy`` now becomes a
implementation. Example: hook implementation. Example:
Old: Old::
redef Notice::policy += { redef Notice::policy += {
[$pred(n: Notice::Info) = { [$pred(n: Notice::Info) = {
@ -176,7 +464,7 @@ Changed Functionality
$action = Notice::ACTION_EMAIL] $action = Notice::ACTION_EMAIL]
}; };
New: New::
hook Notice::policy(n: Notice::Info) hook Notice::policy(n: Notice::Info)
{ {
@ -184,38 +472,52 @@ Changed Functionality
add n$actions[Notice::ACTION_EMAIL]; add n$actions[Notice::ACTION_EMAIL];
} }
- notice() is now likewise a hook, no longer an event. If you have - notice() is now likewise a hook, no longer an event. If you
handlers for that event, you'll likely just need to change the have handlers for that event, you'll likely just need to change
type accordingly. Example: the type accordingly. Example:
Old: Old::
event notice(n: Notice::Info) { ... } event notice(n: Notice::Info) { ... }
New: New::
hook notice(n: Notice::Info) { ... } hook notice(n: Notice::Info) { ... }
- The notice_policy.log is gone. That's a result of the new notice - The ``notice_policy.log`` is gone. That's a result of the new notice
policy setup. policy setup.
- Removed the byte_len() and length() bif functions. Use the "|...|" - Removed the ``byte_len()`` and ``length()`` bif functions. Use the
operator instead. ``|...|`` operator instead.
- The SSH::Login notice has been superseded by an corresponding - The ``SSH::Login`` notice has been superseded by an corresponding
intelligence framework observation (SSH::SUCCESSFUL_LOGIN). intelligence framework observation (``SSH::SUCCESSFUL_LOGIN``).
- PacketFilter::all_packets has been replaced with - ``PacketFilter::all_packets`` has been replaced with
PacketFilter::enable_auto_protocol_capture_filters. ``PacketFilter::enable_auto_protocol_capture_filters``.
- We removed the BitTorrent DPD signatures pending further updates to - We removed the BitTorrent DPD signatures pending further updates to
that analyzer. that analyzer.
- In previous versions of BroControl, running "broctl cron" would create
a file ``$prefix/logs/stats/www`` (where "$prefix" indicates the
installation prefix of Bro). Now, it is created as a directory.
Therefore, if you perform an upgrade install and you're using BroControl,
then you may see an email (generated by "broctl cron") containing an
error message: "error running update-stats". To fix this problem,
either remove that file (it is not needed) or rename it.
- Due to lack of maintenance the Ruby bindings for Broccoli are now
deprecated, and the build process no longer includes them by
default. For the time being, they can still be enabled by
configuring with ``--enable-ruby``, however we plan to remove
Broccoli's Ruby support with the next Bro release.
Bro 2.1 Bro 2.1
------- =======
New Functionality New Functionality
~~~~~~~~~~~~~~~~~ -----------------
- Bro now comes with extensive IPv6 support. Past versions offered - Bro now comes with extensive IPv6 support. Past versions offered
only basic IPv6 functionality that was rarely used in practice as it only basic IPv6 functionality that was rarely used in practice as it
@ -294,30 +596,9 @@ New Functionality
outputs. We do not yet recommend them for production (but welcome outputs. We do not yet recommend them for production (but welcome
feedback!) feedback!)
- Summary statistics framework. [Extend]
- A number of new applications build on top of the summary statistics
framework:
* Scan detection: Detectors for port and address scans return. See
policy/misc/scan.bro.
* Tracerouter detector: policy/misc/detect-traceroute
* Web application detection/measurement: policy/misc/app-metrics.bro
* FTP brute-forcing detector: policy/protocols/ftp/detect-bruteforcing.bro
* HTTP-based SQL injection detector: policy/protocols/http/detect-sqli.bro
(existed before, but now ported to the new framework)
* SSH brute-forcing detector feeding the intelligence framework:
policy/protocols/ssh/detect-bruteforcing.bro
Changed Functionality Changed Functionality
~~~~~~~~~~~~~~~~~~~~~ ---------------------
The following summarizes the most important differences in existing The following summarizes the most important differences in existing
functionality. Note that this list is not complete, see CHANGES for functionality. Note that this list is not complete, see CHANGES for
@ -351,7 +632,10 @@ the full set.
soon. With that, "match" and "using" are no longer reserved keywords. soon. With that, "match" and "using" are no longer reserved keywords.
- The syntax for IPv6 literals changed from "2607:f8b0:4009:802::1012" - The syntax for IPv6 literals changed from "2607:f8b0:4009:802::1012"
to "[2607:f8b0:4009:802::1012]". to "[2607:f8b0:4009:802::1012]". When an IP address variable or IP
address literal is enclosed in pipes (for example,
``|[fe80::db15]|``) the result is now the size of the address in
bits (32 for IPv4 and 128 for IPv6).
- Bro now spawns threads for doing its logging. From a user's - Bro now spawns threads for doing its logging. From a user's
perspective not much should change, except that the OS may now show perspective not much should change, except that the OS may now show
@ -391,60 +675,273 @@ the full set.
- The ASCII writers "header_*" options have been renamed to "meta_*" - The ASCII writers "header_*" options have been renamed to "meta_*"
(because there's now also a footer). (because there's now also a footer).
- Some built-in functions have been removed: "addr_to_count" (use
"addr_to_counts" instead), "bro_has_ipv6" (this is no longer
relevant because Bro now always supports IPv6), "active_connection"
(use "connection_exists" instead), and "connection_record" (use
"lookup_connection" instead).
- The "NFS3::mode2string" built-in function has been renamed to
"file_mode".
- Some built-in functions have been changed: "exit" (now takes the
exit code as a parameter), "to_port" (now takes a string as
parameter instead of a count and transport protocol, but
"count_to_port" is still available), "connect" (now takes an
additional string parameter specifying the zone of a non-global IPv6
address), and "listen" (now takes three additional parameters to
enable listening on IPv6 addresses).
- Some Bro script variables have been renamed:
"LogAscii::header_prefix" has been renamed to
"LogAscii::meta_prefix", "LogAscii::include_header" has been renamed
to "LogAscii::include_meta".
- Some Bro script variables have been removed: "tunnel_port",
"parse_udp_tunnels", "use_connection_compressor",
"cc_handle_resets", "cc_handle_only_syns", and
"cc_instantiate_on_data".
- A couple events have changed: the "icmp_redirect" event now includes
the target and destination addresses and any Neighbor Discovery
options in the message, and the last parameter of the
"dns_AAAA_reply" event has been removed because it was unused.
- The format of the ASCII log files has changed very slightly. Two
new lines are automatically added, one to record the time when the
log was opened, and the other to record the time when the log was
closed.
- In BroControl, the option (in broctl.cfg) "CFlowAddr" was renamed to
"CFlowAddress".
Bro 2.0 Bro 2.0
------- =======
As the version number jump suggests, Bro 2.0 is a major upgrade and As the version number jump from 1.5 suggests, Bro 2.0 is a major
lots of things have changed. We have assembled a separate upgrade upgrade and lots of things have changed. Most importantly, we have
guide with the most important changes compared to Bro 1.5 at rewritten almost all of Bro's default scripts from scratch, using
http://www.bro.org/documentation/upgrade.html. You can find quite different structure now and focusing more on operational
the offline version of that document in ``doc/upgrade.rst.``. deployment. The result is a system that works much better "out of the
box", even without much initial site-specific configuration. The
down-side is that 1.x configurations will need to be adapted to work
with the new version. The two rules of thumb are:
Compared to the earlier 2.0 Beta version, the major changes in the (1) If you have written your own Bro scripts
final release are: that do not depend on any of the standard scripts formerly
found in ``policy/``, they will most likely just keep working
(although you might want to adapt them to use some of the new
features, like the new logging framework; see below).
* The default scripts now come with complete reference (2) If you have custom code that depends on specifics of 1.x
documentation. See default scripts (including most configuration tuning), that is
http://www.bro.org/documentation/index.html. unlikely to work with 2.x. We recommend to start by using just
the new scripts first, and then port over any customizations
incrementally as necessary (they may be much easier to do now,
or even unnecessary). Send mail to the Bro user mailing list
if you need help.
* libz and libmagic are now required dependencies. Below we summarize changes from 1.x to 2.x in more detail. This list
isn't complete, see the ``CHANGES`` file in the distribution.
for the full story.
* Reduced snaplen default from 65535 to old default of 8192. The Script Organization
large value was introducing performance problems on many -------------------
systems.
* Replaced the --snaplen/-l command line option with a In versions before 2.0, Bro scripts were all maintained in a flat
scripting-layer option called "snaplen". The new option can also directory called ``policy/`` in the source tree. This directory is now
be redefined on the command line, e.g. ``bro -i eth0 renamed to ``scripts/`` and contains major subdirectories ``base/``,
snaplen=65535``. ``policy/``, and ``site/``, each of which may also be subdivided
further.
* Reintroduced the BRO_LOG_SUFFIX environment variable that the The contents of the new ``scripts/`` directory, like the old/flat
ASCII logger now respects to add a suffix to the log files it ``policy/`` still gets installed under the ``share/bro``
creates. subdirectory of the installation prefix path just like previous
versions. For example, if Bro was compiled like ``./configure
--prefix=/usr/local/bro && make && make install``, then the script
hierarchy can be found in ``/usr/local/bro/share/bro``.
* The ASCII logs now include further header information, and The main
fields set to an empty value are now logged as ``(empty)`` by subdirectories of that hierarchy are as follows:
default (instead of ``-``, which is already used for fields that
are not set at all).
* Some NOTICES were renamed, and the signatures of some SSL events - ``base/`` contains all scripts that are loaded by Bro by default
have changed. (unless the ``-b`` command line option is used to run Bro in a
minimal configuration). Note that is a major conceptual change:
rather than not loading anything by default, Bro now uses an
extensive set of default scripts out of the box.
* bro-cut got some new capabilities: The scripts under this directory generally either accumulate/log
useful state/protocol information for monitored traffic, configure a
default/recommended mode of operation, or provide extra Bro
scripting-layer functionality that has no significant performance cost.
- If no field names are given on the command line, we now pass - ``policy/`` contains all scripts that a user will need to explicitly
through all fields. tell Bro to load. These are scripts that implement
functionality/analysis that not all users may want to use and may have
more significant performance costs. For a new installation, you
should go through these and see what appears useful to load.
- New options -u/-U for time output in UTC. - ``site/`` remains a directory that can be used to store locally
developed scripts. It now comes with some preinstalled example
scripts that contain recommended default configurations going beyond
the ``base/`` setup. E.g. ``local.bro`` loads extra scripts from
``policy/`` and does extra tuning. These files can be customized in
place without being overwritten by upgrades/reinstalls, unlike
scripts in other directories.
- New option -F to give output field separator. With version 2.0, the default ``BROPATH`` is set to automatically
search for scripts in ``policy/``, ``site/`` and their parent
directory, but **not** ``base/``. Generally, everything under
``base/`` is loaded automatically, but for users of the ``-b`` option,
it's important to know that loading a script in that directory
requires the extra ``base/`` path qualification. For example, the
following two scripts:
* Broccoli supports more types internally, allowing to send * ``$PREFIX/share/bro/base/protocols/ssl/main.bro``
complex records. * ``$PREFIX/share/bro/policy/protocols/ssl/validate-certs.bro``
* Many smaller bug fixes, portability improvements, and general are referenced from another Bro script like:
polishing across all modules.
.. code:: bro
@load base/protocols/ssl/main
@load protocols/ssl/validate-certs
Notice how ``policy/`` can be omitted as a convenience in the second
case. ``@load`` can now also use relative path, e.g., ``@load
../main``.
Logging Framework
-----------------
- The logs generated by scripts that ship with Bro are entirely redone
to use a standardized, machine parsable format via the new logging
framework. Generally, the log content has been restructured towards
making it more directly useful to operations. Also, several
analyzers have been significantly extended and thus now log more
information. Take a look at ``ssl.log``.
* A particular format change that may be useful to note is that the
``conn.log`` ``service`` field is derived from DPD instead of
well-known ports (while that was already possible in 1.5, it was
not the default).
* Also, ``conn.log`` now reports raw number of packets/bytes per
endpoint.
- The new logging framework makes it possible to extend, customize,
and filter logs very easily.
- A common pattern found in the new scripts is to store logging stream
records for protocols inside the ``connection`` records so that
state can be collected until enough is seen to log a coherent unit
of information regarding the activity of that connection. This
state is now frequently seen/accessible in event handlers, for
example, like ``c$<protocol>`` where ``<protocol>`` is replaced by
the name of the protocol. This field is added to the ``connection``
record by ``redef``'ing it in a
``base/protocols/<protocol>/main.bro`` script.
- The logging code has been rewritten internally, with script-level
interface and output backend now clearly separated. While ASCII
logging is still the default, we will add further output types in
the future (binary format, direct database logging).
Notice Framework
----------------
The way users interact with "notices" has changed significantly in order
to make it easier to define a site policy and more extensible for adding
customized actions.
New Default Settings
--------------------
- Dynamic Protocol Detection (DPD) is now enabled/loaded by default.
- The default packet filter now examines all packets instead of
dynamically building a filter based on which protocol analysis scripts
are loaded. See ``PacketFilter::all_packets`` for how to revert to old
behavior.
API Changes
-----------
- The ``@prefixes`` directive works differently now.
Any added prefixes are now searched for and loaded *after* all input
files have been parsed. After all input files are parsed, Bro
searches ``BROPATH`` for prefixed, flattened versions of all of the
parsed input files. For example, if ``lcl`` is in ``@prefixes``, and
``site.bro`` is loaded, then a file named ``lcl.site.bro`` that's in
``BROPATH`` would end up being automatically loaded as well. Packages
work similarly, e.g. loading ``protocols/http`` means a file named
``lcl.protocols.http.bro`` in ``BROPATH`` gets loaded automatically.
- The ``make_addr`` BIF now returns a ``subnet`` versus an ``addr``
Variable Naming
---------------
- ``Module`` is more widely used for namespacing. E.g. the new
``site.bro`` exports the ``local_nets`` identifier (among other
things) into the ``Site`` module.
- Identifiers may have been renamed to conform to new `scripting
conventions
<http://www.bro.org/development/howtos/script-conventions.html>`_
Removed Functionality
---------------------
We have remove a bunch of functionality that was rarely used and/or
had not been maintained for a while already:
- The ``net`` script data type.
- The ``alarm`` statement; use the notice framework instead.
- Trace rewriting.
- DFA state expiration in regexp engine.
- Active mapping.
- Native DAG support (may come back eventually)
- ClamAV support.
- The connection compressor is now disabled by default, and will
be removed in the future.
BroControl Changes
------------------
BroControl looks pretty much similar to the version coming with Bro 1.x,
but has been cleaned up and streamlined significantly internally.
BroControl has a new ``process`` command to process a trace on disk
offline using a similar configuration to what BroControl installs for
live analysis.
BroControl now has an extensive plugin interface for adding new
commands and options. Note that this is still considered experimental.
We have removed the ``analysis`` command, and BroControl currently
does not send daily alarm summaries anymore (this may be restored
later).
Development Infrastructure
--------------------------
Bro development has moved from using SVN to Git for revision control.
Users that want to use the latest Bro development snapshot by checking it out
from the source repositories should see the `development process
<http://www.bro.org/development/process.html>`_. Note that all the various
sub-components now reside in their own repositories. However, the
top-level Bro repository includes them as git submodules so it's easy
to check them all out simultaneously.
Bro now uses `CMake <http://www.cmake.org>`_ for its build system so
that is a new required dependency when building from source.
Bro now comes with a growing suite of regression tests in
``testing/``.

10
README
View file

@ -8,11 +8,21 @@ and pointers for getting started. NEWS contains release notes for the
current version, and CHANGES has the complete history of changes. current version, and CHANGES has the complete history of changes.
Please see COPYING for licensing information. Please see COPYING for licensing information.
You can download source and binary releases on:
http://www.bro.org/download
To get the current development version, clone our master git
repository:
git clone --recursive git://git.bro.org/bro
For more documentation, research publications, and community contact For more documentation, research publications, and community contact
information, please see Bro's home page: information, please see Bro's home page:
http://www.bro.org http://www.bro.org
On behalf of the Bro Development Team, On behalf of the Bro Development Team,
Vern Paxson & Robin Sommer, Vern Paxson & Robin Sommer,

View file

@ -1 +1 @@
2.1-945 2.2-341

@ -1 +1 @@
Subproject commit 314fa8f65fc240e960c23c3bba98623436a72b98 Subproject commit b0877edc68af6ae08face528fc411c8ce21f2e30

@ -1 +1 @@
Subproject commit 91d258cc8b2f74cd02fc93dfe61f73ec9f0dd489 Subproject commit 3f86e2d5db2a0c5f2f104b15f359f4b752bb4558

@ -1 +1 @@
Subproject commit d59c73b6e0966ad63bbc63a35741b5f68263e7b1 Subproject commit 04e6a7f591817f060a781f21c12e1afce7eb1e16

@ -1 +1 @@
Subproject commit 52fd91261f41fa1528f7b964837a364d7991889e Subproject commit d99150801b7844e082b5421d1efe4050702d350e

@ -1 +1 @@
Subproject commit ce366206e3407e534a786ad572c342e9f9fef26b Subproject commit 4e2ec35917acb883c7d2ab19af487f3863c687ae

View file

@ -10,10 +10,4 @@
# BROPATH=`./bro-path-dev` ./src/bro # BROPATH=`./bro-path-dev` ./src/bro
# #
broPolicies=${BRO_SCRIPT_SOURCE_PATH}:${BRO_SCRIPT_SOURCE_PATH}/policy:${BRO_SCRIPT_SOURCE_PATH}/site echo .:${CMAKE_SOURCE_DIR}/scripts:${CMAKE_SOURCE_DIR}/scripts/policy:${CMAKE_SOURCE_DIR}/scripts/site:${CMAKE_BINARY_DIR}/scripts
broGenPolicies=${CMAKE_BINARY_DIR}/scripts
installedPolicies=${BRO_SCRIPT_INSTALL_PATH}:${BRO_SCRIPT_INSTALL_PATH}/site
echo .:$broPolicies:$broGenPolicies

2
cmake

@ -1 +1 @@
Subproject commit 026639f8368e56742c0cb5d9fb390ea64e60ec50 Subproject commit 0f301aa08a970150195a2ea5b3ed43d2d98b35b3

19
configure vendored
View file

@ -32,12 +32,13 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--enable-perftools force use of Google perftools on non-Linux systems --enable-perftools force use of Google perftools on non-Linux systems
(automatically on when perftools is present on Linux) (automatically on when perftools is present on Linux)
--enable-perftools-debug use Google's perftools for debugging --enable-perftools-debug use Google's perftools for debugging
--enable-jemalloc link against jemalloc
--enable-ruby build ruby bindings for broccoli (deprecated)
--disable-broccoli don't build or install the Broccoli library --disable-broccoli don't build or install the Broccoli library
--disable-broctl don't install Broctl --disable-broctl don't install Broctl
--disable-auxtools don't build or install auxiliary tools --disable-auxtools don't build or install auxiliary tools
--disable-perftools don't try to build with Google Perftools --disable-perftools don't try to build with Google Perftools
--disable-python don't try to build python bindings for broccoli --disable-python don't try to build python bindings for broccoli
--disable-ruby don't try to build ruby bindings for broccoli
--disable-dataseries don't use the optional DataSeries log writer --disable-dataseries don't use the optional DataSeries log writer
--disable-elasticsearch don't use the optional ElasticSearch log writer --disable-elasticsearch don't use the optional ElasticSearch log writer
@ -49,11 +50,12 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--with-flex=PATH path to flex executable --with-flex=PATH path to flex executable
--with-bison=PATH path to bison executable --with-bison=PATH path to bison executable
--with-perl=PATH path to perl executable --with-perl=PATH path to perl executable
--with-libmagic=PATH path to libmagic install root
Optional Packages in Non-Standard Locations: Optional Packages in Non-Standard Locations:
--with-libmagic=PATH path to libmagic install root
--with-geoip=PATH path to the libGeoIP install root --with-geoip=PATH path to the libGeoIP install root
--with-perftools=PATH path to Google Perftools install root --with-perftools=PATH path to Google Perftools install root
--with-jemalloc=PATH path to jemalloc install root
--with-python=PATH path to Python interpreter --with-python=PATH path to Python interpreter
--with-python-lib=PATH path to libpython --with-python-lib=PATH path to libpython
--with-python-inc=PATH path to Python headers --with-python-inc=PATH path to Python headers
@ -105,6 +107,7 @@ append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc
append_cache_entry ENABLE_DEBUG BOOL false append_cache_entry ENABLE_DEBUG BOOL false
append_cache_entry ENABLE_PERFTOOLS BOOL false append_cache_entry ENABLE_PERFTOOLS BOOL false
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false
append_cache_entry ENABLE_JEMALLOC BOOL false
append_cache_entry BinPAC_SKIP_INSTALL BOOL true append_cache_entry BinPAC_SKIP_INSTALL BOOL true
append_cache_entry BUILD_SHARED_LIBS BOOL true append_cache_entry BUILD_SHARED_LIBS BOOL true
append_cache_entry INSTALL_AUX_TOOLS BOOL true append_cache_entry INSTALL_AUX_TOOLS BOOL true
@ -113,6 +116,7 @@ append_cache_entry INSTALL_BROCTL BOOL true
append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING
append_cache_entry ENABLE_MOBILE_IPV6 BOOL false append_cache_entry ENABLE_MOBILE_IPV6 BOOL false
append_cache_entry DISABLE_PERFTOOLS BOOL false append_cache_entry DISABLE_PERFTOOLS BOOL false
append_cache_entry DISABLE_RUBY_BINDINGS BOOL true
# parse arguments # parse arguments
while [ $# -ne 0 ]; do while [ $# -ne 0 ]; do
@ -159,6 +163,9 @@ while [ $# -ne 0 ]; do
append_cache_entry ENABLE_PERFTOOLS BOOL true append_cache_entry ENABLE_PERFTOOLS BOOL true
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL true append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL true
;; ;;
--enable-jemalloc)
append_cache_entry ENABLE_JEMALLOC BOOL true
;;
--disable-broccoli) --disable-broccoli)
append_cache_entry INSTALL_BROCCOLI BOOL false append_cache_entry INSTALL_BROCCOLI BOOL false
;; ;;
@ -174,8 +181,8 @@ while [ $# -ne 0 ]; do
--disable-python) --disable-python)
append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true
;; ;;
--disable-ruby) --enable-ruby)
append_cache_entry DISABLE_RUBY_BINDINGS BOOL true append_cache_entry DISABLE_RUBY_BINDINGS BOOL false
;; ;;
--disable-dataseries) --disable-dataseries)
append_cache_entry DISABLE_DATASERIES BOOL true append_cache_entry DISABLE_DATASERIES BOOL true
@ -213,6 +220,10 @@ while [ $# -ne 0 ]; do
--with-perftools=*) --with-perftools=*)
append_cache_entry GooglePerftools_ROOT_DIR PATH $optarg append_cache_entry GooglePerftools_ROOT_DIR PATH $optarg
;; ;;
--with-jemalloc=*)
append_cache_entry JEMALLOC_ROOT_DIR PATH $optarg
append_cache_entry ENABLE_JEMALLOC BOOL true
;;
--with-python=*) --with-python=*)
append_cache_entry PYTHON_EXECUTABLE PATH $optarg append_cache_entry PYTHON_EXECUTABLE PATH $optarg
;; ;;

View file

@ -1 +0,0 @@
../CHANGES

View file

@ -1,75 +1,83 @@
set(BIF_SRC_DIR ${PROJECT_SOURCE_DIR}/src) set(BROCCOLI_DOCS_SRC ${CMAKE_BINARY_DIR}/aux/broccoli/doc/html)
set(RST_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/rest_output) set(BROCCOLI_DOCS_DST ${CMAKE_BINARY_DIR}/html/broccoli-api)
set(DOC_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/out) set(SPHINX_INPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx_input)
set(DOC_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(SPHINX_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx_output)
set(DOC_SOURCE_WORKDIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx-sources) set(BROXYGEN_SCRIPT_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/broxygen_script_output)
set(BROXYGEN_CACHE_DIR ${CMAKE_CURRENT_BINARY_DIR}/broxygen_cache)
set(MASTER_POLICY_INDEX ${CMAKE_CURRENT_BINARY_DIR}/scripts/policy_index) # Find out what BROPATH to use when executing bro.
set(MASTER_PACKAGE_INDEX ${CMAKE_CURRENT_BINARY_DIR}/scripts/pkg_index) execute_process(COMMAND ${CMAKE_BINARY_DIR}/bro-path-dev
OUTPUT_VARIABLE BROPATH
RESULT_VARIABLE retval
OUTPUT_STRIP_TRAILING_WHITESPACE)
if (NOT ${retval} EQUAL 0)
message(FATAL_ERROR "Problem setting BROPATH")
endif ()
file(GLOB_RECURSE DOC_SOURCES FOLLOW_SYMLINKS "*") # Configure the Sphinx config file (expand variables CMake might know about).
# configure the Sphinx config file (expand variables CMake might know about)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in
${CMAKE_CURRENT_BINARY_DIR}/conf.py ${CMAKE_CURRENT_BINARY_DIR}/conf.py
@ONLY) @ONLY)
add_subdirectory(scripts) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/broxygen.conf.in
${CMAKE_CURRENT_BINARY_DIR}/broxygen.conf
@ONLY)
# The "broxygen" target generates reST documentation for any outdated bro add_custom_target(sphinxdoc
# scripts and then uses Sphinx to generate HTML documentation from the reST # Copy the template documentation to build directory to use as input tree
add_custom_target(broxygen # for Sphinx. This is needed because some parts are dynamically generated
# copy the template documentation to the build directory # in to that tree by Bro/Broxygen.
# to give as input for sphinx COMMAND rsync -q -r --copy-links --times --delete
COMMAND "${CMAKE_COMMAND}" -E copy_directory --filter='protect scripts/*'
${DOC_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/ ${SPHINX_INPUT_DIR}
${DOC_SOURCE_WORKDIR} # Use Bro/Broxygen to dynamically generate reST for all Bro scripts.
# copy generated policy script documentation into the COMMAND BROPATH=${BROPATH}
# working copy of the template documentation ${CMAKE_BINARY_DIR}/src/bro
COMMAND "${CMAKE_COMMAND}" -E copy_directory -X ${CMAKE_CURRENT_BINARY_DIR}/broxygen.conf
${RST_OUTPUT_DIR} broxygen >/dev/null
${DOC_SOURCE_WORKDIR}/scripts # Rsync over the generated reST to the Sphinx source tree in the build dir.
# append to the master index of all policy scripts COMMAND rsync -q -r --copy-links --times --delete --filter='protect *.bro'
COMMAND cat ${MASTER_POLICY_INDEX} >> ${BROXYGEN_SCRIPT_OUTPUT}/ ${SPHINX_INPUT_DIR}/scripts
${DOC_SOURCE_WORKDIR}/scripts/index.rst # Rsync over Bro scripts to the Sphinx source tree in the build dir.
# append to the master index of all policy packages # These are used by :download: references in the generated script docs.
COMMAND cat ${MASTER_PACKAGE_INDEX} >> COMMAND rsync -q -r --copy-links --times --delete
${DOC_SOURCE_WORKDIR}/scripts/packages.rst --filter='protect /base/bif/*' --filter='protect *.rst'
# construct a reST file for each group --filter='include */' --filter='include *.bro' --filter='exclude *'
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/bin/group_index_generator.py ${CMAKE_SOURCE_DIR}/scripts/ ${SPHINX_INPUT_DIR}/scripts
${CMAKE_CURRENT_BINARY_DIR}/scripts/group_list # Rsync over Bro scripts created by BIF compiler to the Sphinx source tree.
${CMAKE_CURRENT_BINARY_DIR}/scripts COMMAND rsync -q -r --copy-links --times --delete
${DOC_SOURCE_WORKDIR}/scripts --filter='protect *.rst' --filter='include */'
# tell sphinx to generate html --filter='include *.bro' --filter='exclude *'
${CMAKE_BINARY_DIR}/scripts/base/bif/
${SPHINX_INPUT_DIR}/scripts/base/bif
# Use Sphinx to build HTML.
COMMAND sphinx-build COMMAND sphinx-build
-b html -b html
-c ${CMAKE_CURRENT_BINARY_DIR} -c ${CMAKE_CURRENT_BINARY_DIR}
-d ${DOC_OUTPUT_DIR}/doctrees -d ${SPHINX_OUTPUT_DIR}/doctrees
${DOC_SOURCE_WORKDIR} ${SPHINX_INPUT_DIR}
${DOC_OUTPUT_DIR}/html ${SPHINX_OUTPUT_DIR}/html
# create symlink to the html output directory for convenience # Create symlink to the html output directory for convenience.
COMMAND "${CMAKE_COMMAND}" -E create_symlink COMMAND "${CMAKE_COMMAND}" -E create_symlink
${DOC_OUTPUT_DIR}/html ${SPHINX_OUTPUT_DIR}/html
${CMAKE_BINARY_DIR}/html ${CMAKE_BINARY_DIR}/html
# copy Broccoli API reference into output dir if it exists # Copy Broccoli API reference into output dir if it exists.
COMMAND test -d ${CMAKE_BINARY_DIR}/aux/broccoli/doc/html && ( rm -rf ${CMAKE_BINARY_DIR}/html/broccoli-api && cp -r ${CMAKE_BINARY_DIR}/aux/broccoli/doc/html ${CMAKE_BINARY_DIR}/html/broccoli-api ) || true COMMAND test -d ${BROCCOLI_DOCS_SRC} &&
( rm -rf ${BROCCOLI_DOCS_DST} &&
cp -r ${BROCCOLI_DOCS_SRC} ${BROCCOLI_DOCS_DST} ) || true
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "[Sphinx] Generating HTML policy script docs" COMMENT "[Sphinx] Generate HTML documentation in ${CMAKE_BINARY_DIR}/html")
# SOURCES just adds stuff to IDE projects as a convenience
SOURCES ${DOC_SOURCES})
# The "sphinxclean" target removes just the Sphinx input/output directories add_dependencies(sphinxdoc bro)
# from the build directory.
add_custom_target(broxygenclean add_custom_target(sphinxdoc_clean
COMMAND "${CMAKE_COMMAND}" -E remove_directory COMMAND "${CMAKE_COMMAND}" -E remove_directory ${SPHINX_INPUT_DIR}
${DOC_SOURCE_WORKDIR} COMMAND "${CMAKE_COMMAND}" -E remove_directory ${SPHINX_OUTPUT_DIR}
COMMAND "${CMAKE_COMMAND}" -E remove_directory COMMAND "${CMAKE_COMMAND}" -E remove_directory ${BROXYGEN_SCRIPT_OUTPUT}
${DOC_OUTPUT_DIR} COMMAND "${CMAKE_COMMAND}" -E remove_directory ${BROXYGEN_CACHE_DIR}
VERBATIM) VERBATIM)
add_dependencies(broxygen broxygenclean restdoc)
add_custom_target(doc) add_custom_target(doc)
add_custom_target(docclean) add_custom_target(docclean)
add_dependencies(doc broxygen) add_dependencies(doc sphinxdoc)
add_dependencies(docclean broxygenclean restclean) add_dependencies(docclean sphinxdoc_clean)

View file

@ -1 +0,0 @@
../INSTALL

5
doc/LICENSE Normal file
View file

@ -0,0 +1,5 @@
This work is licensed under the Creative Commons
Attribution-NonCommercial 3.0 Unported License. To view a copy of this
license, visit http://creativecommons.org/licenses/by-nc/3.0/ or send
a letter to Creative Commons, 444 Castro Street, Suite 900, Mountain
View, California, 94041, USA.

View file

@ -10,22 +10,22 @@ common/general documentation, style sheets, JavaScript, etc. The Sphinx
config file is produced from ``conf.py.in``, and can be edited to change config file is produced from ``conf.py.in``, and can be edited to change
various Sphinx options. various Sphinx options.
There is also a custom Sphinx domain implemented in ``source/ext/bro.py`` There is also a custom Sphinx domain implemented in ``ext/bro.py``
which adds some reST directives and roles that aid in generating useful which adds some reST directives and roles that aid in generating useful
index entries and cross-references. Other extensions can be added in index entries and cross-references. Other extensions can be added in
a similar fashion. a similar fashion.
Either the ``make doc`` or ``make broxygen`` targets in the top-level The ``make doc`` target in the top-level Makefile can be used to locally
Makefile can be used to locally render the reST files into HTML. render the reST files into HTML. That target depends on:
Those targets depend on:
* Python interpreter >= 2.5 * Python interpreter >= 2.5
* `Sphinx <http://sphinx.pocoo.org/>`_ >= 1.0.1 * `Sphinx <http://sphinx-doc.org/>`_ >= 1.0.1
* Doxygen (required only for building the Broccoli API doc)
After completion, HTML documentation is symlinked in ``build/html``. After completion, HTML documentation is symlinked in ``build/html``.
There's also ``make docclean`` and ``make broxygenclean`` targets to There's also a ``make docclean`` target which deletes any files
clean the resulting documentation. created during the documentation build process.
Notes for Writing Documentation Notes for Writing Documentation
------------------------------- -------------------------------

View file

@ -439,8 +439,17 @@ td.linenos pre {
color: #aaa; color: #aaa;
} }
.highlight-guess {
overflow:auto;
}
.highlight-none {
overflow:auto;
}
table.highlighttable { table.highlighttable {
margin-left: 0.5em; margin-left: 0.5em;
overflow:scroll;
} }
table.highlighttable td { table.highlighttable td {

View file

@ -150,8 +150,14 @@ sup, sub {
line-height:0; line-height:0;
} }
pre { pre, code {
white-space: pre; white-space: pre;
overflow: auto;
margin-left: 2em;
margin-right: 2em;
margin-top: .5em;
margin-bottom: 1.5em;
word-wrap: normal;
} }
pre, code, tt { pre, code, tt {
@ -166,6 +172,10 @@ dl dt {
font-weight: bold; font-weight: bold;
} }
li dl dt {
font-weight: normal;
}
dd { dd {
margin:0 0 20px 20px; margin:0 0 20px 20px;
} }
@ -174,6 +184,16 @@ small {
font-size:75%; font-size:75%;
} }
.small-text {
font-size:75%;
}
.align-center {
display: block;
margin-left: auto;
margin-right: auto;
}
a:link, a:link,
a:visited, a:visited,
a:active a:active
@ -435,3 +455,31 @@ li {
margin-bottom: .5em; margin-bottom: .5em;
margin-top: 0em; margin-top: 0em;
} }
.btest-cmd .hll {
font-weight: bold;
background: #FFFAE2;
}
.btest-include .hll {
display: block;
text-align: center;
font-family: Palatino;
background: #FFFAE2;
}
.btest-include .hll * {
color: #aaa;
}
.linenodiv pre {
margin-left: 0px;
margin-right: 0px;
width: 1.5em;
text-align: right;
background: #000;
}
.btest-cmd .code pre, .btest-include .code pre {
margin-left: 0px;
}

View file

@ -1,62 +0,0 @@
#! /usr/bin/env python
# This script automatically generates a reST documents that lists
# a collection of Bro scripts that are "grouped" together.
# The summary text (##! comments) of the script is embedded in the list
#
# 1st argument is the file containing list of groups
# 2nd argument is the directory containing ${group}_files lists of
# scripts that belong to the group and ${group}_doc_names lists of
# document names that can be supplied to a reST :doc: role
# 3rd argument is a directory in which write a ${group}.rst file (will
# append to existing file) that contains reST style references to
# script docs along with summary text contained in original script
import sys
import os
import string
group_list = sys.argv[1]
file_manifest_dir = sys.argv[2]
output_dir = sys.argv[3]
with open(group_list, 'r') as f_group_list:
for group in f_group_list.read().splitlines():
#print group
file_manifest = os.path.join(file_manifest_dir, group + "_files")
doc_manifest = os.path.join(file_manifest_dir, group + "_doc_names")
src_files = []
doc_names = []
with open(file_manifest, 'r') as f_file_manifest:
src_files = f_file_manifest.read().splitlines()
with open(doc_manifest, 'r') as f_doc_manifest:
doc_names = f_doc_manifest.read().splitlines()
for i in range(len(src_files)):
src_file = src_files[i]
#print "\t" + src_file
summary_comments = []
with open(src_file, 'r') as f_src_file:
for line in f_src_file:
sum_pos = string.find(line, "##!")
if sum_pos != -1:
summary_comments.append(line[(sum_pos+3):])
#print summary_comments
group_file = os.path.join(output_dir, group + ".rst")
if not os.path.exists(group_file):
if not os.path.exists(os.path.dirname(group_file)):
os.makedirs(os.path.dirname(group_file))
with open(group_file, 'w') as f_group_file:
f_group_file.write(":orphan:\n\n")
title = "Package Index: %s\n" % os.path.dirname(group)
f_group_file.write(title);
for n in range(len(title)):
f_group_file.write("=")
f_group_file.write("\n");
with open(group_file, 'a') as f_group_file:
f_group_file.write("\n:doc:`/scripts/%s`\n" % doc_names[i])
for line in summary_comments:
f_group_file.write(" " + line)

79
doc/broids/index.rst Normal file
View file

@ -0,0 +1,79 @@
.. _bro-ids:
=======
Bro IDS
=======
An Intrusion Detection System (IDS) allows you to detect suspicious
activities happening on your network as a result of a past or active
attack. Because of its programming capabilities, Bro can easily be
configured to behave like traditional IDSs and detect common attacks
with well known patterns, or you can create your own scripts to detect
conditions specific to your particular case.
In the following sections, we present a few examples of common uses of
Bro as an IDS.
-------------------------------------------------
Detecting an FTP Brute-force Attack and Notifying
-------------------------------------------------
For the purpose of this exercise, we define FTP brute-forcing as too many
rejected usernames and passwords occurring from a single address. We
start by defining a threshold for the number of attempts, a monitoring
interval (in minutes), and a new notice type.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ftp/detect-bruteforcing.bro
:lines: 9-25
Using the ftp_reply event, we check for error codes from the `500
series <http://en.wikipedia.org/wiki/List_of_FTP_server_return_codes>`_
for the "USER" and "PASS" commands, representing rejected usernames or
passwords. For this, we can use the :bro:see:`FTP::parse_ftp_reply_code`
function to break down the reply code and check if the first digit is a
"5" or not. If true, we then use the :ref:`Summary Statistics Framework
<sumstats-framework>` to keep track of the number of failed attempts.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ftp/detect-bruteforcing.bro
:lines: 52-60
Next, we use the SumStats framework to raise a notice of the attack when
the number of failed attempts exceeds the specified threshold during the
measuring interval.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ftp/detect-bruteforcing.bro
:lines: 28-50
Below is the final code for our script.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ftp/detect-bruteforcing.bro
.. btest:: ftp-bruteforce
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/ftp/bruteforce.pcap protocols/ftp/detect-bruteforcing.bro
@TEST-EXEC: btest-rst-include notice.log
As a final note, the :doc:`detect-bruteforcing.bro
</scripts/policy/protocols/ftp/detect-bruteforcing.bro>` script above is
included with Bro out of the box. Use this feature by loading this script
during startup.
-------------
Other Attacks
-------------
Detecting SQL Injection Attacks
-------------------------------
Checking files against known malware hashes
-------------------------------------------
Files transmitted on your network could either be completely harmless or
contain viruses and other threats. One possible action against this
threat is to compute the hashes of the files and compare them against a
list of known malware hashes. Bro simplifies this task by offering a
:doc:`detect-MHR.bro </scripts/policy/frameworks/files/detect-MHR.bro>`
script that creates and compares hashes against the `Malware Hash
Registry <https://www.team-cymru.org/Services/MHR/>`_ maintained by Team
Cymru. Use this feature by loading this script during startup.

1
doc/broxygen.conf.in Normal file
View file

@ -0,0 +1 @@
script * @BROXYGEN_SCRIPT_OUTPUT@/

View file

@ -1,84 +0,0 @@
Bro Cluster
===========
Intro
------
Bro is not multithreaded, so once the limitations of a single processor core are reached, the only option currently is to spread the workload across many cores or even many physical computers. The cluster deployment scenario for Bro is the current solution to build these larger systems. The accompanying tools and scripts provide the structure to easily manage many Bro processes examining packets and doing correlation activities but acting as a singular, cohesive entity.
Architecture
---------------
The figure below illustrates the main components of a Bro cluster.
.. image:: images/deployment.png
Tap
***
This is a mechanism that splits the packet stream in order to make a copy
available for inspection. Examples include the monitoring port on a switch and
an optical splitter for fiber networks.
Frontend
********
This is a discrete hardware device or on-host technique that will split your traffic into many streams or flows. The Bro binary does not do this job. There are numerous ways to accomplish this task, some of which are described below in `Frontend Options`_.
Manager
*******
This is a Bro process which has two primary jobs. It receives log messages and notices from the rest of the nodes in the cluster using the Bro communications protocol. The result is that you will end up with single logs for each log instead of many discrete logs that you have to later combine in some manner with post processing. The manager also takes the opportunity to de-duplicate notices and it has the ability to do so since its acting as the choke point for notices and how notices might be processed into actions such as emailing, paging, or blocking.
The manager process is started first by BroControl and it only opens its designated port and waits for connections, it doesnt initiate any connections to the rest of the cluster. Once the workers are started and connect to the manager, logs and notices will start arriving to the manager process from the workers.
Proxy
*****
This is a Bro process which manages synchronized state. Variables can be synchronized across connected Bro processes automatically in Bro and proxies will help the workers by alleviating the need for all of the workers to connect directly to each other.
Examples of synchronized state from the scripts that ship with Bro are things such as the full list of “known” hosts and services which are hosts or services which have been detected as performing full TCP handshakes or an analyzed protocol has been found on the connection. If worker A detects host 1.2.3.4 as an active host, it would be beneficial for worker B to know that as well so worker A shares that information as an insertion to a set <link to set documentation would be good here> which travels to the clusters proxy and the proxy then sends that same set insertion to worker B. The result is that worker A and worker B have shared knowledge about host and services that are active on the network being monitored.
The proxy model extends to having multiple proxies as well if necessary for performance reasons, it only adds one additional step for the Bro processes. Each proxy connects to another proxy in a ring and the workers are shared between them as evenly as possible. When a proxy receives some new bit of state, it will share that with its proxy which is then shared around the ring of proxies and down to all of the workers. From a practical standpoint, there are no rules of thumb established yet for the number of proxies necessary for the number of workers they are serving. Best is to start with a single proxy and add more if communication performance problems are found.
Bro processes acting as proxies dont tend to be extremely intense to CPU or memory and users frequently run proxy processes on the same physical host as the manager.
Worker
******
This is the Bro process that sniffs network traffic and does protocol analysis on the reassembled traffic streams. Most of the work of an active cluster takes place on the workers and as such, the workers typically represent the bulk of the Bro processes that are running in a cluster. The fastest memory and CPU core speed you can afford is best here since all of the protocol parsing and most analysis will take place here. There are no particular requirements for the disks in workers since almost all logging is done remotely to the manager and very little is normally written to disk.
The rule of thumb we have followed recently is to allocate approximately 1 core for every 80Mbps of traffic that is being analyzed, however this estimate could be extremely traffic mix specific. It has generally worked for mixed traffic with many users and servers. For example, if your traffic peaks around 2Gbps (combined) and you want to handle traffic at peak load, you may want to have 26 cores available (2048 / 80 == 25.6). If the 80Mbps estimate works for your traffic, this could be handled by 3 physical hosts dedicated to being workers with each one containing dual 6-core processors.
Once a flow based load balancer is put into place this model is extremely easy to scale as well so its recommended that you guess at the amount of hardware you will need to fully analyze your traffic. If it turns out that you need more, its relatively easy to increase the size of the cluster in most cases.
Frontend Options
----------------
There are many options for setting up a frontend flow distributor and in many cases it may even be beneficial to do multiple stages of flow distribution on the network and on the host.
Discrete hardware flow balancers
********************************
cPacket
^^^^^^^
If you are monitoring one or more 10G physical interfaces, the recommended solution is to use either a cFlow or cVu device from cPacket because they are currently being used very successfully at a number of sites. These devices will perform layer-2 load balancing by rewriting the destination ethernet MAC address to cause each packet associated with a particular flow to have the same destination MAC. The packets can then be passed directly to a monitoring host where each worker has a BPF filter to limit its visibility to only that stream of flows or onward to a commodity switch to split the traffic out to multiple 1G interfaces for the workers. This can ultimately greatly reduce costs since workers can use relatively inexpensive 1G interfaces.
OpenFlow Switches
^^^^^^^^^^^^^^^^^
We are currently exploring the use of OpenFlow based switches to do flow based load balancing directly on the switch which can greatly reduce frontend costs for many users. This document will be updated when we have more information.
On host flow balancing
**********************
PF_RING
^^^^^^^
The PF_RING software for Linux has a “clustering” feature which will do flow based load balancing across a number of processes that are sniffing the same interface. This will allow you to easily take advantage of multiple cores in a single physical host because Bros main event loop is single threaded and cant natively utilize all of the cores. More information about Bro with PF_RING can be found here: (someone want to write a quick Bro/PF_RING tutorial to link to here? document installing kernel module, libpcap wrapper, building Bro with the --with-pcap configure option)
Netmap
^^^^^^
FreeBSD has an in-progress project named Netmap which will enable flow based load balancing as well. When it becomes viable for real world use, this document will be updated.
Click! Software Router
^^^^^^^^^^^^^^^^^^^^^^
Click! can be used for flow based load balancing with a simple configuration. (link to an example for the config). This solution is not recommended on Linux due to Bros PF_RING support and only as a last resort on other operating systems since it causes a lot of overhead due to context switching back and forth between kernel and userland several times per packet.

172
doc/cluster/index.rst Normal file
View file

@ -0,0 +1,172 @@
========================
Bro Cluster Architecture
========================
Bro is not multithreaded, so once the limitations of a single processor core
are reached the only option currently is to spread the workload across many
cores, or even many physical computers. The cluster deployment scenario for
Bro is the current solution to build these larger systems. The tools and
scripts that accompany Bro provide the structure to easily manage many Bro
processes examining packets and doing correlation activities but acting as
a singular, cohesive entity. This document describes the Bro cluster
architecture. For information on how to configure a Bro cluster,
see the documentation for
:doc:`BroControl <../components/broctl/README>`.
Architecture
---------------
The figure below illustrates the main components of a Bro cluster.
.. image:: /images/deployment.png
Tap
***
The tap is a mechanism that splits the packet stream in order to make a copy
available for inspection. Examples include the monitoring port on a switch
and an optical splitter on fiber networks.
Frontend
********
The frontend is a discrete hardware device or on-host technique that splits
traffic into many streams or flows. The Bro binary does not do this job.
There are numerous ways to accomplish this task, some of which are described
below in `Frontend Options`_.
Manager
*******
The manager is a Bro process that has two primary jobs. It receives log
messages and notices from the rest of the nodes in the cluster using the Bro
communications protocol. The result is a single log instead of many
discrete logs that you have to combine in some manner with post-processing.
The manager also takes the opportunity to de-duplicate notices, and it has the
ability to do so since it's acting as the choke point for notices and how
notices might be processed into actions (e.g., emailing, paging, or blocking).
The manager process is started first by BroControl and it only opens its
designated port and waits for connections, it doesn't initiate any
connections to the rest of the cluster. Once the workers are started and
connect to the manager, logs and notices will start arriving to the manager
process from the workers.
Proxy
*****
The proxy is a Bro process that manages synchronized state. Variables can
be synchronized across connected Bro processes automatically. Proxies help
the workers by alleviating the need for all of the workers to connect
directly to each other.
Examples of synchronized state from the scripts that ship with Bro include
the full list of "known" hosts and services (which are hosts or services
identified as performing full TCP handshakes) or an analyzed protocol has been
found on the connection. If worker A detects host 1.2.3.4 as an active host,
it would be beneficial for worker B to know that as well. So worker A shares
that information as an insertion to a set which travels to the cluster's
proxy and the proxy sends that same set insertion to worker B. The result
is that worker A and worker B have shared knowledge about host and services
that are active on the network being monitored.
The proxy model extends to having multiple proxies when necessary for
performance reasons. It only adds one additional step for the Bro processes.
Each proxy connects to another proxy in a ring and the workers are shared
between them as evenly as possible. When a proxy receives some new bit of
state it will share that with its proxy, which is then shared around the
ring of proxies, and down to all of the workers. From a practical standpoint,
there are no rules of thumb established for the number of proxies
necessary for the number of workers they are serving. It is best to start
with a single proxy and add more if communication performance problems are
found.
Bro processes acting as proxies don't tend to be extremely hard on CPU
or memory and users frequently run proxy processes on the same physical
host as the manager.
Worker
******
The worker is the Bro process that sniffs network traffic and does protocol
analysis on the reassembled traffic streams. Most of the work of an active
cluster takes place on the workers and as such, the workers typically
represent the bulk of the Bro processes that are running in a cluster.
The fastest memory and CPU core speed you can afford is recommended
since all of the protocol parsing and most analysis will take place here.
There are no particular requirements for the disks in workers since almost all
logging is done remotely to the manager, and normally very little is written
to disk.
The rule of thumb we have followed recently is to allocate approximately 1
core for every 80Mbps of traffic that is being analyzed. However, this
estimate could be extremely traffic mix-specific. It has generally worked
for mixed traffic with many users and servers. For example, if your traffic
peaks around 2Gbps (combined) and you want to handle traffic at peak load,
you may want to have 26 cores available (2048 / 80 == 25.6). If the 80Mbps
estimate works for your traffic, this could be handled by 3 physical hosts
dedicated to being workers with each one containing dual 6-core processors.
Once a flow-based load balancer is put into place this model is extremely
easy to scale. It is recommended that you estimate the amount of
hardware you will need to fully analyze your traffic. If more is needed it's
relatively easy to increase the size of the cluster in most cases.
Frontend Options
----------------
There are many options for setting up a frontend flow distributor. In many
cases it is beneficial to do multiple stages of flow distribution
on the network and on the host.
Discrete hardware flow balancers
********************************
cPacket
^^^^^^^
If you are monitoring one or more 10G physical interfaces, the recommended
solution is to use either a cFlow or cVu device from cPacket because they
are used successfully at a number of sites. These devices will perform
layer-2 load balancing by rewriting the destination Ethernet MAC address
to cause each packet associated with a particular flow to have the same
destination MAC. The packets can then be passed directly to a monitoring
host where each worker has a BPF filter to limit its visibility to only that
stream of flows, or onward to a commodity switch to split the traffic out to
multiple 1G interfaces for the workers. This greatly reduces
costs since workers can use relatively inexpensive 1G interfaces.
OpenFlow Switches
^^^^^^^^^^^^^^^^^
We are currently exploring the use of OpenFlow based switches to do flow-based
load balancing directly on the switch, which greatly reduces frontend
costs for many users. This document will be updated when we have more
information.
On host flow balancing
**********************
PF_RING
^^^^^^^
The PF_RING software for Linux has a "clustering" feature which will do
flow-based load balancing across a number of processes that are sniffing the
same interface. This allows you to easily take advantage of multiple
cores in a single physical host because Bro's main event loop is single
threaded and can't natively utilize all of the cores. If you want to use
PF_RING, see the documentation on `how to configure Bro with PF_RING
<http://bro.org/documentation/load-balancing.html>`_.
Netmap
^^^^^^
FreeBSD has an in-progress project named Netmap which will enable flow-based
load balancing as well. When it becomes viable for real world use, this
document will be updated.
Click! Software Router
^^^^^^^^^^^^^^^^^^^^^^
Click! can be used for flow based load balancing with a simple configuration.
This solution is not recommended on
Linux due to Bro's PF_RING support and only as a last resort on other
operating systems since it causes a lot of overhead due to context switching
back and forth between kernel and userland several times per packet.

28
doc/components/index.rst Normal file
View file

@ -0,0 +1,28 @@
=============
Subcomponents
=============
The following are snapshots of documentation for components that come
with this version of Bro (|version|). Since they can also be used
independently, see the `download page
<http://bro.org/download/index.html>`_ for documentation of any
current, independent component releases.
.. toctree::
:maxdepth: 1
BinPAC - A protocol parser generator <binpac/README>
Broccoli - The Bro Client Communication Library (README) <broccoli/README>
Broccoli - User Manual <broccoli/broccoli-manual>
Broccoli Python Bindings <broccoli-python/README>
Broccoli Ruby Bindings <broccoli-ruby/README>
BroControl - Interactive Bro management shell <broctl/README>
Bro-Aux - Small auxiliary tools for Bro <bro-aux/README>
BTest - A unit testing framework <btest/README>
Capstats - Command-line packet statistic tool <capstats/README>
PySubnetTree - Python module for CIDR lookups<pysubnettree/README>
trace-summary - Script for generating break-downs of network traffic <trace-summary/README>
The `Broccoli API Reference <../broccoli-api/index.html>`_ may also be of
interest.

View file

@ -12,10 +12,34 @@
import sys, os import sys, os
extensions = []
# If extensions (or modules to document with autodoc) are in another directory, # If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the # add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here. # documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinx-sources/ext')) sys.path.insert(0, os.path.abspath('sphinx_input/ext'))
# ----- Begin of BTest configuration. -----
btest = os.path.abspath("@CMAKE_SOURCE_DIR@/aux/btest")
brocut = os.path.abspath("@CMAKE_SOURCE_DIR@/aux/bro-aux/bro-cut")
bro = os.path.abspath("@CMAKE_SOURCE_DIR@/build/src")
os.environ["PATH"] += (":%s:%s/sphinx:%s:%s" % (btest, btest, bro, brocut))
sys.path.append(os.path.join(btest, "sphinx"))
extensions += ["btest-sphinx"]
btest_base="@CMAKE_SOURCE_DIR@/testing/btest"
btest_tests="doc/sphinx"
# ----- End of BTest configuration. -----
# ----- Begin of Broxygen configuration. -----
extensions += ["broxygen"]
bro_binary = os.path.abspath("@CMAKE_SOURCE_DIR@/build/src/bro")
broxygen_cache="@BROXYGEN_CACHE_DIR@"
os.environ["BROPATH"] = "@BROPATH@"
os.environ["BROMAGIC"] = "@BROMAGIC@"
# ----- End of Broxygen configuration. -----
# -- General configuration ----------------------------------------------------- # -- General configuration -----------------------------------------------------
@ -24,10 +48,13 @@ sys.path.insert(0, os.path.abspath('sphinx-sources/ext'))
# Add any Sphinx extension module names here, as strings. They can be extensions # Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['bro', 'rst_directive', 'sphinx.ext.todo', 'adapt-toc'] extensions += ['bro', 'rst_directive', 'sphinx.ext.todo', 'adapt-toc']
os.environ["BRO_SRC_ROOT"] = "@CMAKE_SOURCE_DIR@"
os.environ["DOC_ROOT"] = "@CMAKE_SOURCE_DIR@/doc"
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
templates_path = ['sphinx-sources/_templates', 'sphinx-sources/_static'] templates_path = ['sphinx_input/_templates', 'sphinx_input/_static']
# The suffix of source filenames. # The suffix of source filenames.
source_suffix = '.rst' source_suffix = '.rst'
@ -40,7 +67,7 @@ master_doc = 'index'
# General information about the project. # General information about the project.
project = u'Bro' project = u'Bro'
copyright = u'2012, The Bro Project' copyright = u'2013, The Bro Project'
# The version info for the project you're documenting, acts as replacement for # The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the # |version| and |release|, also used in various other places throughout the
@ -63,7 +90,7 @@ today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and # List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files. # directories to ignore when looking for source files.
exclude_patterns = [] exclude_patterns = [".#*"]
# The reST default role (used for this markup: `text`) to use for all documents. # The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None #default_role = None
@ -121,7 +148,7 @@ html_theme_options = { }
# Add any paths that contain custom static files (such as style sheets) here, # Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files, # relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css". # so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['sphinx-sources/_static'] html_static_path = ['sphinx_input/_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format. # using the given strftime format.

View file

@ -191,6 +191,10 @@ class BroNotices(Index):
def generate(self, docnames=None): def generate(self, docnames=None):
content = {} content = {}
if 'notices' not in self.domain.env.domaindata['bro']:
return content, False
for n in self.domain.env.domaindata['bro']['notices']: for n in self.domain.env.domaindata['bro']['notices']:
modname = n[0].split("::")[0] modname = n[0].split("::")[0]
entries = content.setdefault(modname, []) entries = content.setdefault(modname, [])

317
doc/ext/broxygen.py Normal file
View file

@ -0,0 +1,317 @@
"""
Broxygen domain for Sphinx.
Adds directives that allow Sphinx to invoke Bro in order to generate script
reference documentation on the fly. The directives are:
broxygen:package
- Shows links to all scripts contained within matching package(s).
broxygen:package_index
- An index with links to matching package document(s).
broxygen:script
- Reference for matching script(s) (i.e. everything declared by the script).
broxygen:script_summary
- Shows link to matching script(s) with it's summary-section comments.
broxygen:script_index
- An index with links to all matching scrips.
broxygen:proto_analyzer
- All protocol analyzers and their components (events/bifs, etc.)
broxygen:file_analyzer
- All file analyzers and their components (events/bifs, etc.)
"""
from sphinx.domains import Domain, ObjType
from sphinx.locale import l_
from docutils.parsers.rst.directives.misc import Include
App = None
def info(msg):
"""Use Sphinx builder to output a console message."""
global App
from sphinx.util.console import blue
App.builder.info(blue(msg))
def pattern_to_filename_component(pattern):
"""Replace certain characters in Broxygen config file target pattern.
Such that it can be used as part of a (sane) filename.
"""
return pattern.replace("/", ".").replace("*", "star")
def ensure_dir(path):
"""Should act like ``mkdir -p``."""
import os
import errno
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def generate_config(env, type, pattern):
"""Create a Broxygen config file for a particular target.
It can be used by Bro to generate reST docs for that target.
"""
import os
import tempfile
from sphinx.errors import SphinxError
work_dir = env.config.broxygen_cache
if not work_dir:
raise SphinxError("broxygen_cache not set in sphinx config file")
ensure_dir(work_dir)
prefix = "{0}-{1}-".format(type, pattern_to_filename_component(pattern))
(fd, cfg) = tempfile.mkstemp(suffix=".cfg", prefix=prefix, dir=work_dir)
generated_file = "{0}.rst".format(cfg)
config = "{0}\t{1}\t{2}".format(type, pattern, generated_file)
f = os.fdopen(fd, "w")
f.write(config)
f.close()
return (cfg, generated_file)
def generate_target(env, type, pattern):
"""Create a Broxygen target and build it.
For a target which hasn't been referenced by any other script, this function
creates an associated config file then uses Bro w/ it to build the target
and stores the target information in the build environment.
If a script references a target that's already found in the build
environment the results of the previous built are re-used.
"""
app_data = env.domaindata["broxygen"]
if (type, pattern) in app_data["targets"]:
info("Broxygen has cached doc for target '{0} {1}'".format(
type, pattern))
return app_data["targets"]
(cfg, gend_file) = generate_config(env, type, pattern)
target = BroxygenTarget(type, pattern, cfg, gend_file)
app_data["targets"][(type, pattern)] = target
build_target(env, target)
info("Broxygen built target '{0} {1}'".format(type, pattern))
return target
def build_target(env, target):
"""Invoke a Bro process to build a Broxygen target."""
import os
import subprocess
path_to_bro = env.config.bro_binary
if not path_to_bro:
raise SphinxError("'bro' not set in sphinx config file (path to bro)")
bro_cmd = "{0} -X {1} broxygen".format(path_to_bro, target.config_file)
cwd = os.getcwd()
os.chdir(os.path.dirname(target.config_file))
try:
subprocess.check_output(bro_cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
from sphinx.errors import SphinxError
raise SphinxError(
"Command '{0}' returned non-zero exit status {1}: {2}".format(
e.cmd, e.returncode, e.output))
finally:
os.chdir(cwd)
class BroxygenTarget(object):
"""Some portion of reST documentation that Bro knows how to generate.
A target is identified by its type and pattern. E.g. type "script" and
pattern "broxygen/example.bro".
"""
def __init__(self, type, pattern, config_file, generated_file):
self.type = type
self.pattern = pattern
self.config_file = config_file
self.generated_file = generated_file
self.used_in_docs = set()
class BroxygenDirective(Include):
"""Base class for Broxygen directives.
It can use Bro to generate reST documentation on the fly and embed it in
the document at the location of the directive just like the ``.. include::``
directive. The only argument is a pattern to identify to Bro which
pieces of documentation it needs to create.
"""
required_arguments = 1
has_content = False
target_type = None
def run(self):
env = self.state.document.settings.env
info("Broxygen running .. {0}:: {1} in {2}".format(
self.name, self.arguments[0], env.docname))
target = generate_target(env, self.target_type, self.arguments[0])
target.used_in_docs.add(env.docname)
self.arguments = [target.generated_file]
return super(BroxygenDirective, self).run()
class PackageDirective(BroxygenDirective):
target_type = "package"
class PackageIndexDirective(BroxygenDirective):
target_type = "package_index"
class ScriptDirective(BroxygenDirective):
target_type = "script"
class ScriptSummaryDirective(BroxygenDirective):
target_type = "script_summary"
class ScriptIndexDirective(BroxygenDirective):
target_type = "script_index"
class ProtoAnalyzerDirective(BroxygenDirective):
target_type = "proto_analyzer"
class FileAnalyzerDirective(BroxygenDirective):
target_type = "file_analyzer"
class IdentifierDirective(BroxygenDirective):
target_type = "identifier"
class BroxygenDomain(Domain):
name = "broxygen"
label = "Broxygen"
object_types = {
"package": ObjType(l_("package")),
"package_index": ObjType(l_("package_index")),
"script": ObjType(l_("script")),
"script_summary": ObjType(l_("script_summary")),
"script_index": ObjType(l_("script_index")),
"proto_analyzer": ObjType(l_("proto_analyzer")),
"file_analyzer": ObjType(l_("file_analyzer")),
"identifier": ObjType(l_("identifier")),
}
directives = {
"package": PackageDirective,
"package_index": PackageIndexDirective,
"script": ScriptDirective,
"script_summary": ScriptSummaryDirective,
"script_index": ScriptIndexDirective,
"proto_analyzer": ProtoAnalyzerDirective,
"file_analyzer": FileAnalyzerDirective,
"identifier": IdentifierDirective,
}
roles = {}
initial_data = {
"targets": {}
}
def clear_doc(self, docname):
"""Update Broxygen targets referenced in docname.
If it's the last place the target was referenced, remove it from
the build environment and delete any generated config/reST files
associated with it from the cache.
"""
import os
stale_targets = []
for (type, pattern), target in self.data["targets"].items():
if docname in target.used_in_docs:
target.used_in_docs.remove(docname)
if not target.used_in_docs:
stale_targets.append(target)
for target in stale_targets:
del self.data["targets"][(target.type, target.pattern)]
os.remove(target.config_file)
os.remove(target.generated_file)
def get_objects(self):
"""No Broxygen-generated content is itself linkable/searchable."""
return []
def env_get_outdated_hook(app, env, added, changed, removed):
"""Check whether to re-read any documents referencing Broxygen targets.
To do that we have to ask Bro to rebuild each target and compare the
before and after modification times of the generated reST output file.
If Bro changed it, then the document containing the Broxygen directive
needs to be re-read.
"""
import os
reread = set()
for target in app.env.domaindata["broxygen"]["targets"].values():
before_mtime = os.stat(target.generated_file)
build_target(env, target)
after_mtime = os.stat(target.generated_file)
if after_mtime > before_mtime:
info("Broxygen target '{0} {1}' outdated".format(
target.type, target.pattern))
for docname in target.used_in_docs:
if docname not in removed:
info(" in document: {0}".format(docname))
reread.add(docname)
return list(reread)
def setup(app):
global App
App = app
app.add_domain(BroxygenDomain)
app.add_config_value("bro_binary", None, "env")
app.add_config_value("broxygen_cache", None, "env")
app.connect("env-get-outdated", env_get_outdated_hook)

View file

@ -1,180 +0,0 @@
==========================
Frequently Asked Questions
==========================
.. raw:: html
<div class="faq">
.. contents::
Installation and Configuration
==============================
How can I tune my operating system for best capture performance?
----------------------------------------------------------------
Here are some pointers to more information:
* Fabian Schneider's research on `high performance packet capture
<http://www.net.t-labs.tu-berlin.de/research/hppc>`_
* `NSMWiki <http://nsmwiki.org/Main_Page>`_ has page on
*Collecting Data*.
* An `IMC 2010 paper
<http://conferences.sigcomm.org/imc/2010/papers/p206.pdf>`_ by
Lothar Braun et. al evaluates packet capture performance on
commodity hardware
Are there any gotchas regarding interface configuration for live capture? Or why might I be seeing abnormally large packets much greater than interface MTU?
-------------------------------------------------------------------------------------------------------------------------------------------------------------
Some NICs offload the reassembly of traffic into "superpackets" so that
fewer packets are then passed up the stack (e.g. "TCP segmentation
offload", or "generic segmentation offload"). The result is that the
capturing application will observe packets much larger than the MTU size
of the interface they were captured from and may also interfere with the
maximum packet capture length, ``snaplen``, so it's a good idea to disable
an interface's offloading features.
You can use the ``ethtool`` program on Linux to view and disable
offloading features of an interface. See this page for more explicit
directions:
http://securityonion.blogspot.com/2011/10/when-is-full-packet-capture-not-full.html
What does an error message like ``internal error: NB-DNS error`` mean?
----------------------------------------------------------------------
That often means that DNS is not set up correctly on the system
running Bro. Try verifying from the command line that DNS lookups
work, e.g., ``host www.google.com``.
I am using OpenBSD and having problems installing Bro?
------------------------------------------------------
One potential issue is that the top-level Makefile may not work with
OpenBSD's default make program, in which case you can either install
the ``gmake`` package and use it instead or first change into the
``build/`` directory before doing either ``make`` or ``make install``
such that the CMake-generated Makefile's are used directly.
Generally, please note that we do not regularly test OpenBSD builds.
We appreciate any patches that improve Bro's support for this
platform.
How do BroControl options affect Bro script variables?
------------------------------------------------------
Some (but not all) BroControl options override a corresponding Bro script variable.
For example, setting the BroControl option "LogRotationInterval" will override
the value of the Bro script variable "Log::default_rotation_interval".
See the :doc:`BroControl Documentation <components/broctl/README>` to find out
which BroControl options override Bro script variables, and for more discussion
on site-specific customization.
Usage
=====
How can I identify backscatter?
-------------------------------
Identifying backscatter via connections labeled as ``OTH`` is not a reliable
means to detect backscatter. Backscatter is however visible by interpreting
the contents of the ``history`` field in the ``conn.log`` file. The basic idea
is to watch for connections that never had an initial ``SYN`` but started
instead with a ``SYN-ACK`` or ``RST`` (though this latter generally is just
discarded). Here are some history fields which provide backscatter examples:
``hAFf``, ``r``. Refer to the conn protocol analysis scripts to interpret the
individual character meanings in the history field.
Is there help for understanding Bro's resource consumption?
-----------------------------------------------------------
There are two scripts that collect statistics on resource usage:
``misc/stats.bro`` and ``misc/profiling.bro``. The former is quite
lightweight, while the latter should only be used for debugging.
How can I capture packets as an unprivileged user?
--------------------------------------------------
Normally, unprivileged users cannot capture packets from a network interface,
which means they would not be able to use Bro to read/analyze live traffic.
However, there are operating system specific ways to enable packet capture
permission for non-root users, which is worth doing in the context of using
Bro to monitor live traffic.
With Linux Capabilities
^^^^^^^^^^^^^^^^^^^^^^^
Fully implemented since Linux kernel 2.6.24, capabilities are a way of
parceling superuser privileges into distinct units. Attach capabilities
required to capture packets to the ``bro`` executable file like this:
.. console::
sudo setcap cap_net_raw,cap_net_admin=eip /path/to/bro
Now any unprivileged user should have the capability to capture packets
using Bro provided that they have the traditional file permissions to
read/execute the ``bro`` binary.
With BPF Devices
^^^^^^^^^^^^^^^^
Systems using Berkeley Packet Filter (BPF) (e.g. FreeBSD & Mac OS X)
can allow users with read access to a BPF device to capture packets from
it using libpcap.
* Example of manually changing BPF device permissions to allow users in
the ``admin`` group to capture packets:
.. console::
sudo chgrp admin /dev/bpf*
sudo chmod g+r /dev/bpf*
* Example of configuring devfs to set permissions of BPF devices, adding
entries to ``/etc/devfs.conf`` to grant ``admin`` group permission to
capture packets:
.. console::
sudo sh -c 'echo "own bpf root:admin" >> /etc/devfs.conf'
sudo sh -c 'echo "perm bpf 0640" >> /etc/devfs.conf'
sudo service devfs restart
.. note:: As of Mac OS X 10.6, the BPF device is on devfs, but the used version
of devfs isn't capable of setting the device permissions. The permissions
can be changed manually, but they will not survive a reboot.
Why isn't Bro producing the logs I expect? (A Note About Checksums)
-------------------------------------------------------------------
Normally, Bro's event engine will discard packets which don't have valid
checksums. This can be a problem if one wants to analyze locally
generated/captured traffic on a system that offloads checksumming to the
network adapter. In that case, all transmitted/captured packets will have
bad checksums because they haven't yet been calculated by the NIC, thus
such packets will not undergo analysis defined in Bro policy scripts as they
normally would. Bad checksums in traces may also be a result of some packet
alteration tools.
Bro has two options to workaround such situations and ignore bad checksums:
1) The ``-C`` command line option to ``bro``.
2) An option called ``ignore_checksums`` that can be redefined at the
policy script layer (e.g. in your ``$PREFIX/share/bro/site/local.bro``):
.. code:: bro
redef ignore_checksums = T;
The other alternative is to disable checksum offloading for your
network adapter, but this is not always possible or desirable.
.. raw:: html
</div>

View file

@ -1,3 +1,6 @@
.. _file-analysis-framework:
============= =============
File Analysis File Analysis
============= =============
@ -31,40 +34,13 @@ some information about the file such as which network
:bro:see:`connection` and protocol are transporting the file, how many :bro:see:`connection` and protocol are transporting the file, how many
bytes have been transferred so far, and its MIME type. bytes have been transferred so far, and its MIME type.
.. code:: bro Here's a simple example:
event connection_state_remove(c: connection) .. btest-include:: ${DOC_ROOT}/frameworks/file_analysis_01.bro
{
print "connection_state_remove";
print c$uid;
print c$id;
for ( s in c$service )
print s;
}
event file_state_remove(f: fa_file) .. btest:: file-analysis-01
{
print "file_state_remove";
print f$id;
for ( cid in f$conns )
{
print f$conns[cid]$uid;
print cid;
}
print f$source;
}
might give output like:: @TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/get.trace ${DOC_ROOT}/frameworks/file_analysis_01.bro
file_state_remove
Cx92a0ym5R8
REs2LQfVW2j
[orig_h=10.0.0.7, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]
HTTP
connection_state_remove
REs2LQfVW2j
[orig_h=10.0.0.7, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]
HTTP
This doesn't perform any interesting analysis yet, but does highlight This doesn't perform any interesting analysis yet, but does highlight
the similarity between analysis of connections and files. Connections the similarity between analysis of connections and files. Connections
@ -82,57 +58,47 @@ attached, they start receiving the contents of the file as Bro extracts
it from an ongoing network connection. What they do with the file it from an ongoing network connection. What they do with the file
contents is up to the particular file analyzer implementation, but contents is up to the particular file analyzer implementation, but
they'll typically either report further information about the file via they'll typically either report further information about the file via
events (e.g. :bro:see:`FileAnalysis::ANALYZER_MD5` will report the events (e.g. :bro:see:`Files::ANALYZER_MD5` will report the
file's MD5 checksum via :bro:see:`file_hash` once calculated) or they'll file's MD5 checksum via :bro:see:`file_hash` once calculated) or they'll
have some side effect (e.g. :bro:see:`FileAnalysis::ANALYZER_EXTRACT` have some side effect (e.g. :bro:see:`Files::ANALYZER_EXTRACT`
will write the contents of the file out to the local file system). will write the contents of the file out to the local file system).
In the future there may be file analyzers that automatically attach to In the future there may be file analyzers that automatically attach to
files based on heuristics, similar to the Dynamic Protocol Detection files based on heuristics, similar to the Dynamic Protocol Detection
(DPD) framework for connections, but many will always require an (DPD) framework for connections, but many will always require an
explicit attachment decision: explicit attachment decision.
.. code:: bro Here's a simple example of how to use the MD5 file analyzer to
calculate the MD5 of plain text files:
event file_new(f: fa_file) .. btest-include:: ${DOC_ROOT}/frameworks/file_analysis_02.bro
{
print "new file", f$id;
if ( f?$mime_type && f$mime_type == "text/plain" )
FileAnalysis::add_analyzer(f, [$tag=FileAnalysis::ANALYZER_MD5]);
}
event file_hash(f: fa_file, kind: string, hash: string) .. btest:: file-analysis-02
{
print "file_hash", f$id, kind, hash;
}
this script calculates MD5s for all plain text files and might give @TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/get.trace ${DOC_ROOT}/frameworks/file_analysis_02.bro
output::
new file, Cx92a0ym5R8
file_hash, Cx92a0ym5R8, md5, 397168fd09991a0e712254df7bc639ac
Some file analyzers might have tunable parameters that need to be Some file analyzers might have tunable parameters that need to be
specified in the call to :bro:see:`FileAnalysis::add_analyzer`: specified in the call to :bro:see:`Files::add_analyzer`:
.. code:: bro .. code:: bro
event file_new(f: fa_file) event file_new(f: fa_file)
{ {
FileAnalysis::add_analyzer(f, [$tag=FileAnalysis::ANALYZER_EXTRACT, Files::add_analyzer(f, Files::ANALYZER_EXTRACT,
$extract_filename="./myfile"]); [$extract_filename="myfile"]);
} }
In this case, the file extraction analyzer doesn't generate any further In this case, the file extraction analyzer doesn't generate any further
events, but does have the side effect of writing out the file contents events, but does have the effect of writing out the file contents to the
to the local file system at the specified location of ``./myfile``. Of local file system at the location resulting from the concatenation of
course, for a network with more than a single file being transferred, the path specified by :bro:see:`FileExtract::prefix` and the string,
it's probably preferable to specify a different extraction path for each ``myfile``. Of course, for a network with more than a single file being
file, unlike this example. transferred, it's probably preferable to specify a different extraction
path for each file, unlike this example.
Regardless of which file analyzers end up acting on a file, general Regardless of which file analyzers end up acting on a file, general
information about the file (e.g. size, time of last data transferred, information about the file (e.g. size, time of last data transferred,
MIME type, etc.) are logged in ``file_analysis.log``. MIME type, etc.) are logged in ``files.log``.
Input Framework Integration Input Framework Integration
=========================== ===========================
@ -143,41 +109,19 @@ in the same way it analyzes files that it sees coming over traffic from
a network interface it's monitoring. It only requires a call to a network interface it's monitoring. It only requires a call to
:bro:see:`Input::add_analysis`: :bro:see:`Input::add_analysis`:
.. code:: bro .. btest-include:: ${DOC_ROOT}/frameworks/file_analysis_03.bro
redef exit_only_after_terminate = T;
event file_new(f: fa_file)
{
print "new file", f$id;
FileAnalysis::add_analyzer(f, [$tag=FileAnalysis::ANALYZER_MD5]);
}
event file_state_remove(f: fa_file)
{
Input::remove(f$source);
terminate();
}
event file_hash(f: fa_file, kind: string, hash: string)
{
print "file_hash", f$id, kind, hash;
}
event bro_init()
{
local source: string = "./myfile";
Input::add_analysis([$source=source, $name=source]);
}
Note that the "source" field of :bro:see:`fa_file` corresponds to the Note that the "source" field of :bro:see:`fa_file` corresponds to the
"name" field of :bro:see:`Input::AnalysisDescription` since that is what "name" field of :bro:see:`Input::AnalysisDescription` since that is what
the input framework uses to uniquely identify an input stream. the input framework uses to uniquely identify an input stream.
The output of the above script may be:: The output of the above script may be (assuming a file called "myfile"
exists):
new file, G1fS2xthS4l .. btest:: file-analysis-03
file_hash, G1fS2xthS4l, md5, 54098b367d2e87b078671fad4afb9dbb
@TEST-EXEC: echo "Hello world" > myfile
@TEST-EXEC: btest-rst-cmd bro ${DOC_ROOT}/frameworks/file_analysis_03.bro
Nothing that special, but it at least verifies the MD5 file analyzer Nothing that special, but it at least verifies the MD5 file analyzer
saw all the bytes of the input file and calculated the checksum saw all the bytes of the input file and calculated the checksum

View file

@ -0,0 +1,20 @@
event connection_state_remove(c: connection)
{
print "connection_state_remove";
print c$uid;
print c$id;
for ( s in c$service )
print s;
}
event file_state_remove(f: fa_file)
{
print "file_state_remove";
print f$id;
for ( cid in f$conns )
{
print f$conns[cid]$uid;
print cid;
}
print f$source;
}

View file

@ -0,0 +1,11 @@
event file_new(f: fa_file)
{
print "new file", f$id;
if ( f?$mime_type && f$mime_type == "text/plain" )
Files::add_analyzer(f, Files::ANALYZER_MD5);
}
event file_hash(f: fa_file, kind: string, hash: string)
{
print "file_hash", f$id, kind, hash;
}

View file

@ -0,0 +1,25 @@
redef exit_only_after_terminate = T;
event file_new(f: fa_file)
{
print "new file", f$id;
Files::add_analyzer(f, Files::ANALYZER_MD5);
}
event file_state_remove(f: fa_file)
{
print "file_state_remove";
Input::remove(f$source);
terminate();
}
event file_hash(f: fa_file, kind: string, hash: string)
{
print "file_hash", f$id, kind, hash;
}
event bro_init()
{
local source: string = "./myfile";
Input::add_analysis([$source=source, $name=source]);
}

View file

@ -1,4 +1,6 @@
.. _geolocation:
=========== ===========
GeoLocation GeoLocation
=========== ===========
@ -9,10 +11,41 @@ GeoLocation
to find the geographic location for an IP address. Bro has support to find the geographic location for an IP address. Bro has support
for the `GeoIP library <http://www.maxmind.com/app/c>`__ at the for the `GeoIP library <http://www.maxmind.com/app/c>`__ at the
policy script level beginning with release 1.3 to account for this policy script level beginning with release 1.3 to account for this
need. need. To use this functionality, you need to first install the libGeoIP
software, and then install the GeoLite city database before building
Bro.
.. contents:: .. contents::
Install libGeoIP
----------------
* FreeBSD:
.. console::
sudo pkg_add -r GeoIP
* RPM/RedHat-based Linux:
.. console::
sudo yum install GeoIP-devel
* DEB/Debian-based Linux:
.. console::
sudo apt-get install libgeoip-dev
* Mac OS X:
Vanilla OS X installations don't ship with libGeoIP, but if
installed from your preferred package management system (e.g.
MacPorts, Fink, or Homebrew), they should be automatically detected
and Bro will compile against them.
GeoIPLite Database Installation GeoIPLite Database Installation
------------------------------------ ------------------------------------
@ -20,39 +53,23 @@ A country database for GeoIPLite is included when you do the C API
install, but for Bro, we are using the city database which includes install, but for Bro, we are using the city database which includes
cities and regions in addition to countries. cities and regions in addition to countries.
`Download <http://www.maxmind.com/app/geolitecity>`__ the geolitecity `Download <http://www.maxmind.com/app/geolitecity>`__ the GeoLite city
binary database and follow the directions to install it. binary database.
FreeBSD Quick Install
---------------------
.. console:: .. console::
pkg_add -r GeoIP
wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
gunzip GeoLiteCity.dat.gz gunzip GeoLiteCity.dat.gz
mv GeoLiteCity.dat /usr/local/share/GeoIP/GeoIPCity.dat
# Set your environment correctly before running Bro's configure script Next, the file needs to be put in the database directory. This directory
export CFLAGS=-I/usr/local/include should already exist and will vary depending on which platform and package
export LDFLAGS=-L/usr/local/lib you are using. For FreeBSD, use ``/usr/local/share/GeoIP``. For Linux,
use ``/usr/share/GeoIP`` or ``/var/lib/GeoIP`` (choose whichever one
already exists).
CentOS Quick Install
--------------------
.. console:: .. console::
yum install GeoIP-devel mv GeoLiteCity.dat <path_to_database_dir>/GeoIPCity.dat
wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
gunzip GeoLiteCity.dat.gz
mkdir -p /var/lib/GeoIP/
mv GeoLiteCity.dat /var/lib/GeoIP/GeoIPCity.dat
# Set your environment correctly before running Bro's configure script
export CFLAGS=-I/usr/local/include
export LDFLAGS=-L/usr/local/lib
Usage Usage
@ -65,8 +82,8 @@ functionality:
function lookup_location(a:addr): geo_location function lookup_location(a:addr): geo_location
There is also the ``geo_location`` data structure that is returned There is also the :bro:see:`geo_location` data structure that is returned
from the ``lookup_location`` function: from the :bro:see:`lookup_location` function:
.. code:: bro .. code:: bro

17
doc/frameworks/index.rst Normal file
View file

@ -0,0 +1,17 @@
==========
Frameworks
==========
.. toctree::
:maxdepth: 1
file-analysis
geoip
input
intel
logging
notice
signatures
sumstats

View file

@ -1,6 +1,9 @@
==============================================
Loading Data into Bro with the Input Framework .. _framework-input:
==============================================
===============
Input Framework
===============
.. rst-class:: opening .. rst-class:: opening
@ -259,8 +262,13 @@ to optimize the speed of the input framework. It can generate arbitrary
amounts of semi-random data in all Bro data types supported by the input amounts of semi-random data in all Bro data types supported by the input
framework. framework.
In the future, the input framework will get support for new data sources Currently, Bro supports the following readers in addition to the
like, for example, different databases. aforementioned ones:
.. toctree::
:maxdepth: 1
logging-input-sqlite
Add_table options Add_table options
----------------- -----------------

View file

@ -1,5 +1,7 @@
Intel Framework
=============== ======================
Intelligence Framework
======================
Intro Intro
----- -----
@ -57,15 +59,15 @@ intelligence framework has distribution mechanisms which will push
data out to all of the nodes that need it. data out to all of the nodes that need it.
Here is an example of the intelligence data format. Note that all Here is an example of the intelligence data format. Note that all
whitespace separators are literal tabs and fields containing only a whitespace field separators are literal tabs and fields containing only a
hyphen are considered to be null values. :: hyphen are considered to be null values. ::
#fields indicator indicator_type meta.source meta.desc meta.url #fields indicator indicator_type meta.source meta.desc meta.url
1.2.3.4 Intel::ADDR source1 Sending phishing email http://source1.com/badhosts/1.2.3.4 1.2.3.4 Intel::ADDR source1 Sending phishing email http://source1.com/badhosts/1.2.3.4
a.b.com Intel::DOMAIN source2 Name used for data exfiltration - a.b.com Intel::DOMAIN source2 Name used for data exfiltration -
For more examples of built in `indicator_type` values, please refer to the For a list of all built-in `indicator_type` values, please refer to the
autogenerated documentation for the intelligence framework. documentation of :bro:see:`Intel::Type`.
To load the data once files are created, use the following example To load the data once files are created, use the following example
code to define files to load with your own file names of course:: code to define files to load with your own file names of course::
@ -85,7 +87,7 @@ When some bit of data is extracted (such as an email address in the
"From" header in a message over SMTP), the Intelligence Framework "From" header in a message over SMTP), the Intelligence Framework
needs to be informed that this data was discovered and it's presence needs to be informed that this data was discovered and it's presence
should be checked within the intelligence data set. This is should be checked within the intelligence data set. This is
accomplished through the Intel::seen function. accomplished through the :bro:see:`Intel::seen` function.
Typically users won't need to work with this function due to built in Typically users won't need to work with this function due to built in
hook scripts that Bro ships with that will "see" data and send it into hook scripts that Bro ships with that will "see" data and send it into
@ -93,8 +95,8 @@ the intelligence framework. A user may only need to load the entire
package of hook scripts as a module or pick and choose specific package of hook scripts as a module or pick and choose specific
scripts to load. Keep in mind that as more data is sent into the scripts to load. Keep in mind that as more data is sent into the
intelligence framework, the CPU load consumed by Bro will increase intelligence framework, the CPU load consumed by Bro will increase
depending on how many times the Intel::seen function is being called depending on how many times the :bro:see:`Intel::seen` function is
which is heavily traffic dependent. being called which is heavily traffic dependent.
The full package of hook scripts that Bro ships with for sending this The full package of hook scripts that Bro ships with for sending this
"seen" data into the intelligence framework can be loading by adding "seen" data into the intelligence framework can be loading by adding
@ -108,12 +110,12 @@ Intelligence Matches
Against all hopes, most networks will eventually have a hit on Against all hopes, most networks will eventually have a hit on
intelligence data which could indicate a possible compromise or other intelligence data which could indicate a possible compromise or other
unwanted activity. The Intelligence Framework provides an event that unwanted activity. The Intelligence Framework provides an event that
is generated whenever a match is discovered named Intel::match (TODO: is generated whenever a match is discovered named :bro:see:`Intel::match`.
make a link to inline docs). Due to design restrictions placed upon Due to design restrictions placed upon
the intelligence framework, there is no assurance as to where this the intelligence framework, there is no assurance as to where this
event will be generated. It could be generated on the worker where event will be generated. It could be generated on the worker where
the data was seen or on the manager. When the Intel::match event is the data was seen or on the manager. When the ``Intel::match`` event is
handled, only the data given as event arguments to the event can be handled, only the data given as event arguments to the event can be
assured since the host where the data was seen may not be where assured since the host where the data was seen may not be where
Intel::match is handled. ``Intel::match`` is handled.

View file

@ -104,7 +104,7 @@ code like this to your ``local.bro``:
} }
Bro's DataSeries writer comes with a few tuning options, see Bro's DataSeries writer comes with a few tuning options, see
:doc:`scripts/base/frameworks/logging/writers/dataseries`. :doc:`/scripts/base/frameworks/logging/writers/dataseries.bro`.
Working with DataSeries Working with DataSeries
======================= =======================

View file

@ -25,18 +25,18 @@ respond successfully to the insertion request.
Installing ElasticSearch Installing ElasticSearch
------------------------ ------------------------
Download the latest version from: <http://www.elasticsearch.org/download/>. Download the latest version from: http://www.elasticsearch.org/download/.
Once extracted, start ElasticSearch with:: Once extracted, start ElasticSearch with::
# ./bin/elasticsearch # ./bin/elasticsearch
For more detailed information, refer to the ElasticSearch installation For more detailed information, refer to the ElasticSearch installation
documentation: http://www.elasticsearch.org/guide/reference/setup/installation.html documentation: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html
Compiling Bro with ElasticSearch Support Compiling Bro with ElasticSearch Support
---------------------------------------- ----------------------------------------
First, ensure that you have libcurl installed the run configure.:: First, ensure that you have libcurl installed then run configure::
# ./configure # ./configure
[...] [...]
@ -51,9 +51,9 @@ First, ensure that you have libcurl installed the run configure.::
Activating ElasticSearch Activating ElasticSearch
------------------------ ------------------------
The easiest way to enable ElasticSearch output is to load the tuning/logs-to- The easiest way to enable ElasticSearch output is to load the
elasticsearch.bro script. If you are using BroControl, the following line in tuning/logs-to-elasticsearch.bro script. If you are using BroControl,
local.bro will enable it. the following line in local.bro will enable it:
.. console:: .. console::
@ -76,7 +76,7 @@ A common problem encountered with ElasticSearch is too many files being held
open. The ElasticSearch website has some suggestions on how to increase the open. The ElasticSearch website has some suggestions on how to increase the
open file limit. open file limit.
- http://www.elasticsearch.org/tutorials/2011/04/06/too-many-open-files.html - http://www.elasticsearch.org/tutorials/too-many-open-files/
TODO TODO
---- ----

View file

@ -0,0 +1,166 @@
============================================
Logging To and Reading From SQLite Databases
============================================
.. rst-class:: opening
Starting with version 2.2, Bro features a SQLite logging writer
as well as a SQLite input reader. SQLite is a simple, file-based,
widely used SQL database system. Using SQLite allows Bro to write
and access data in a format that is easy to use in interchange with
other applications. Due to the transactional nature of SQLite,
databases can be used by several applications simultaneously. Hence,
they can, for example, be used to make data that changes regularly available
to Bro on a continuing basis.
.. contents::
Warning
=======
In contrast to the ASCII reader and writer, the SQLite plugins have not yet
seen extensive use in production environments. While we are not aware
of any issues with them, we urge to caution when using them
in production environments. There could be lingering issues which only occur
when the plugins are used with high amounts of data or in high-load environments.
Logging Data into SQLite Databases
==================================
Logging support for SQLite is available in all Bro installations starting with
version 2.2. There is no need to load any additional scripts or for any compile-time
configurations.
Sending data from existing logging streams to SQLite is rather straightforward. You
have to define a filter which specifies SQLite as the writer.
The following example code adds SQLite as a filter for the connection log:
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-conn-filter.bro
.. btest:: sqlite-conn-filter-check
# Make sure this parses correctly at least.
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-conn-filter.bro
Bro will create the database file ``/var/db/conn.sqlite``, if it does not already exist.
It will also create a table with the name ``conn`` (if it does not exist) and start
appending connection information to the table.
At the moment, SQLite databases are not rotated the same way ASCII log-files are. You
have to take care to create them in an adequate location.
If you examine the resulting SQLite database, the schema will contain the same fields
that are present in the ASCII log files::
# sqlite3 /var/db/conn.sqlite
SQLite version 3.8.0.2 2013-09-03 17:11:13
Enter ".help" for instructions
Enter SQL statements terminated with a ";"
sqlite> .schema
CREATE TABLE conn (
'ts' double precision,
'uid' text,
'id.orig_h' text,
'id.orig_p' integer,
...
Note that the ASCII ``conn.log`` will still be created. To disable the ASCII writer for a
log stream, you can remove the default filter:
.. code:: bro
Log::remove_filter(Conn::LOG, "default");
To create a custom SQLite log file, you have to create a new log stream that contains
just the information you want to commit to the database. Please refer to the
:ref:`framework-logging` documentation on how to create custom log streams.
Reading Data from SQLite Databases
==================================
Like logging support, support for reading data from SQLite databases is built into Bro starting
with version 2.2.
Just as with the text-based input readers (please refer to the :ref:`framework-input`
documentation for them and for basic information on how to use the input-framework), the SQLite reader
can be used to read data - in this case the result of SQL queries - into tables or into events.
Reading Data into Tables
------------------------
To read data from a SQLite database, we first have to provide Bro with the information, how
the resulting data will be structured. For this example, we expect that we have a SQLite database,
which contains host IP addresses and the user accounts that are allowed to log into a specific
machine.
The SQLite commands to create the schema are as follows::
create table machines_to_users (
host text unique not null,
users text not null);
insert into machines_to_users values ('192.168.17.1', 'bernhard,matthias,seth');
insert into machines_to_users values ('192.168.17.2', 'bernhard');
insert into machines_to_users values ('192.168.17.3', 'seth,matthias');
After creating a file called ``hosts.sqlite`` with this content, we can read the resulting table
into Bro:
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-read-table.bro
.. btest:: sqlite-read-table-check
# Make sure this parses correctly at least.
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-read-table.bro
Afterwards, that table can be used to check logins into hosts against the available
userlist.
Turning Data into Events
------------------------
The second mode is to use the SQLite reader to output the input data as events. Typically there
are two reasons to do this. First, when the structure of the input data is too complicated
for a direct table import. In this case, the data can be read into an event which can then
create the necessary data structures in Bro in scriptland.
The second reason is, that the dataset is too big to hold it in memory. In this case, the checks
can be performed on-demand, when Bro encounters a situation where it needs additional information.
An example for this would be an internal huge database with malware hashes. Live database queries
could be used to check the sporadically happening downloads against the database.
The SQLite commands to create the schema are as follows::
create table malware_hashes (
hash text unique not null,
description text not null);
insert into malware_hashes values ('86f7e437faa5a7fce15d1ddcb9eaeaea377667b8', 'malware a');
insert into malware_hashes values ('e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98', 'malware b');
insert into malware_hashes values ('84a516841ba77a5b4648de2cd0dfcb30ea46dbb4', 'malware c');
insert into malware_hashes values ('3c363836cf4e16666669a25da280a1865c2d2874', 'malware d');
insert into malware_hashes values ('58e6b3a414a1e090dfc6029add0f3555ccba127f', 'malware e');
insert into malware_hashes values ('4a0a19218e082a343a1b17e5333409af9d98f0f5', 'malware f');
insert into malware_hashes values ('54fd1711209fb1c0781092374132c66e79e2241b', 'malware g');
insert into malware_hashes values ('27d5482eebd075de44389774fce28c69f45c8a75', 'malware h');
insert into malware_hashes values ('73f45106968ff8dc51fba105fa91306af1ff6666', 'ftp-trace');
The following code uses the file-analysis framework to get the sha1 hashes of files that are
transmitted over the network. For each hash, a SQL-query is run against SQLite. If the query
returns with a result, we had a hit against our malware-database and output the matching hash.
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-read-events.bro
.. btest:: sqlite-read-events-check
# Make sure this parses correctly at least.
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-read-events.bro
If you run this script against the trace in ``testing/btest/Traces/ftp/ipv4.trace``, you
will get one hit.

View file

@ -1,6 +1,9 @@
==========================
Customizing Bro's Logging .. _framework-logging:
==========================
=================
Logging Framework
=================
.. rst-class:: opening .. rst-class:: opening
@ -45,7 +48,7 @@ Basics
The data fields that a stream records are defined by a record type The data fields that a stream records are defined by a record type
specified when it is created. Let's look at the script generating Bro's specified when it is created. Let's look at the script generating Bro's
connection summaries as an example, connection summaries as an example,
:doc:`scripts/base/protocols/conn/main`. It defines a record :doc:`/scripts/base/protocols/conn/main.bro`. It defines a record
:bro:type:`Conn::Info` that lists all the fields that go into :bro:type:`Conn::Info` that lists all the fields that go into
``conn.log``, each marked with a ``&log`` attribute indicating that it ``conn.log``, each marked with a ``&log`` attribute indicating that it
is part of the information written out. To write a log record, the is part of the information written out. To write a log record, the
@ -89,7 +92,8 @@ Note the fields that are set for the filter:
are generated by taking the stream's ID and munging it slightly. are generated by taking the stream's ID and munging it slightly.
:bro:enum:`Conn::LOG` is converted into ``conn``, :bro:enum:`Conn::LOG` is converted into ``conn``,
:bro:enum:`PacketFilter::LOG` is converted into :bro:enum:`PacketFilter::LOG` is converted into
``packet_filter``. ``packet_filter``, and :bro:enum:`Known::CERTS_LOG` is
converted into ``known_certs``.
``include`` ``include``
A set limiting the fields to the ones given. The names A set limiting the fields to the ones given. The names
@ -305,7 +309,7 @@ ASCII Writer Configuration
-------------------------- --------------------------
The ASCII writer has a number of options for customizing the format of The ASCII writer has a number of options for customizing the format of
its output, see :doc:`scripts/base/frameworks/logging/writers/ascii`. its output, see :doc:`/scripts/base/frameworks/logging/writers/ascii.bro`.
Adding Streams Adding Streams
============== ==============
@ -365,7 +369,7 @@ save the logged ``Foo::Info`` record into the connection record:
} }
See the existing scripts for how to work with such a new connection See the existing scripts for how to work with such a new connection
field. A simple example is :doc:`scripts/base/protocols/syslog/main`. field. A simple example is :doc:`/scripts/base/protocols/syslog/main.bro`.
When you are developing scripts that add data to the :bro:type:`connection` When you are developing scripts that add data to the :bro:type:`connection`
record, care must be given to when and how long data is stored. record, care must be given to when and how long data is stored.
@ -383,3 +387,4 @@ Bro supports the following output formats other than ASCII:
logging-dataseries logging-dataseries
logging-elasticsearch logging-elasticsearch
logging-input-sqlite

View file

@ -1,4 +1,6 @@
.. _notice-framework:
Notice Framework Notice Framework
================ ================
@ -98,9 +100,9 @@ type :bro:see:`SSH::Password_Guessing` if the server is 10.0.0.1:
.. note:: .. note::
Keep in mind that the semantics of the SSH::Password_Guessing notice are Keep in mind that the semantics of the :bro:see:`SSH::Password_Guessing`
such that it is only raised when Bro heuristically detects a failed notice are such that it is only raised when Bro heuristically detects
login. a failed login.
Hooks can also have priorities applied to order their execution like events Hooks can also have priorities applied to order their execution like events
with a default priority of 0. Greater values are executed first. Setting with a default priority of 0. Greater values are executed first. Setting
@ -283,7 +285,7 @@ information to suppress duplicates for a configurable period of time.
The ``$identifier`` field is typically comprised of several pieces of The ``$identifier`` field is typically comprised of several pieces of
data related to the notice that when combined represent a unique data related to the notice that when combined represent a unique
instance of that notice. Here is an example of the script instance of that notice. Here is an example of the script
:doc:`scripts/policy/protocols/ssl/validate-certs` raising a notice :doc:`/scripts/policy/protocols/ssl/validate-certs.bro` raising a notice
for session negotiations where the certificate or certificate chain did for session negotiations where the certificate or certificate chain did
not validate successfully against the available certificate authority not validate successfully against the available certificate authority
certificates. certificates.
@ -339,7 +341,7 @@ included below.
hook Notice::policy(n: Notice::Info) hook Notice::policy(n: Notice::Info)
{ {
if ( n?$conn && n$conn?$http && n$conn$http?$host ) if ( n?$conn && n$conn?$http && n$conn$http?$host )
n$email_body_sections[|email_body_sections|] = fmt("HTTP host header: %s", n$conn$http$host); n$email_body_sections[|n$email_body_sections|] = fmt("HTTP host header: %s", n$conn$http$host);
} }
@ -348,7 +350,7 @@ Cluster Considerations
As a user/developer of Bro, the main cluster concern with the notice framework As a user/developer of Bro, the main cluster concern with the notice framework
is understanding what runs where. When a notice is generated on a worker, the is understanding what runs where. When a notice is generated on a worker, the
worker checks to see if the notice shoudl be suppressed based on information worker checks to see if the notice should be suppressed based on information
locally maintained in the worker process. If it's not being locally maintained in the worker process. If it's not being
suppressed, the worker forwards the notice directly to the manager and does no more suppressed, the worker forwards the notice directly to the manager and does no more
local processing. The manager then runs the :bro:see:`Notice::policy` hook and local processing. The manager then runs the :bro:see:`Notice::policy` hook and

View file

@ -1,7 +1,7 @@
========== ===================
Signatures Signature Framework
========== ===================
.. rst-class:: opening .. rst-class:: opening
@ -46,7 +46,7 @@ signature's event statement (``Found root!``), and data is the last
piece of payload which triggered the pattern match. piece of payload which triggered the pattern match.
To turn such :bro:id:`signature_match` events into actual alarms, you can To turn such :bro:id:`signature_match` events into actual alarms, you can
load Bro's :doc:`/scripts/base/frameworks/signatures/main` script. load Bro's :doc:`/scripts/base/frameworks/signatures/main.bro` script.
This script contains a default event handler that raises This script contains a default event handler that raises
:bro:enum:`Signatures::Sensitive_Signature` :doc:`Notices <notice>` :bro:enum:`Signatures::Sensitive_Signature` :doc:`Notices <notice>`
(as well as others; see the beginning of the script). (as well as others; see the beginning of the script).
@ -64,8 +64,8 @@ expect that signature file in the same directory as the Bro script. The
default extension of the file name is ``.sig``, and Bro appends that default extension of the file name is ``.sig``, and Bro appends that
automatically when necessary. automatically when necessary.
Signature language Signature Language for Network Traffic
================== ======================================
Let's look at the format of a signature more closely. Each individual Let's look at the format of a signature more closely. Each individual
signature has the format ``signature <id> { <attributes> }``. ``<id>`` signature has the format ``signature <id> { <attributes> }``. ``<id>``
@ -286,6 +286,44 @@ two actions defined:
connection (``"http"``, ``"ftp"``, etc.). This is used by Bro's connection (``"http"``, ``"ftp"``, etc.). This is used by Bro's
dynamic protocol detection to activate analyzers on the fly. dynamic protocol detection to activate analyzers on the fly.
Signature Language for File Content
===================================
The signature framework can also be used to identify MIME types of files
irrespective of the network protocol/connection over which the file is
transferred. A special type of signature can be written for this
purpose and will be used automatically by the :doc:`Files Framework
<file-analysis>` or by Bro scripts that use the :bro:see:`file_magic`
built-in function.
Conditions
----------
File signatures use a single type of content condition in the form of a
regular expression:
``file-magic /<regular expression>/``
This is analogous to the ``payload`` content condition for the network
traffic signature language described above. The difference is that
``payload`` signatures are applied to payloads of network connections,
but ``file-magic`` can be applied to any arbitrary data, it does not
have to be tied to a network protocol/connection.
Actions
-------
Upon matching a chunk of data, file signatures use the following action
to get information about that data's MIME type:
``file-mime <string> [, <integer>]``
The arguments include the MIME type string associated with the file
magic regular expression and an optional "strength" as a signed integer.
Since multiple file magic signatures may match against a given chunk of
data, the strength value may be used to help choose a "winner". Higher
values are considered stronger.
Things to keep in mind when writing signatures Things to keep in mind when writing signatures
============================================== ==============================================

View file

@ -0,0 +1,12 @@
event bro_init()
{
local filter: Log::Filter =
[
$name="sqlite",
$path="/var/db/conn",
$config=table(["tablename"] = "conn"),
$writer=Log::WRITER_SQLITE
];
Log::add_filter(Conn::LOG, filter);
}

View file

@ -0,0 +1,40 @@
@load frameworks/files/hash-all-files
type Val: record {
hash: string;
description: string;
};
event line(description: Input::EventDescription, tpe: Input::Event, r: Val)
{
print fmt("malware-hit with hash %s, description %s", r$hash, r$description);
}
global malware_source = "/var/db/malware";
event file_hash(f: fa_file, kind: string, hash: string)
{
# check all sha1 hashes
if ( kind=="sha1" )
{
Input::add_event(
[
$source=malware_source,
$name=hash,
$fields=Val,
$ev=line,
$want_record=T,
$config=table(
["query"] = fmt("select * from malware_hashes where hash='%s';", hash)
),
$reader=Input::READER_SQLITE
]);
}
}
event Input::end_of_data(name: string, source:string)
{
if ( source == malware_source )
Input::remove(name);
}

View file

@ -0,0 +1,35 @@
type Idx: record {
host: addr;
};
type Val: record {
users: set[string];
};
global hostslist: table[addr] of Val = table();
event bro_init()
{
Input::add_table([$source="/var/db/hosts",
$name="hosts",
$idx=Idx,
$val=Val,
$destination=hostslist,
$reader=Input::READER_SQLITE,
$config=table(["query"] = "select * from machines_to_users;")
]);
Input::remove("hosts");
}
event Input::end_of_data(name: string, source: string)
{
if ( name != "hosts" )
return;
# now all data is in the table
print "Hosts list has been successfully imported";
# List the users of one host.
print hostslist[192.168.17.1]$users;
}

View file

@ -0,0 +1,36 @@
@load base/frameworks/sumstats
event connection_established(c: connection)
{
# Make an observation!
# This observation is global so the key is empty.
# Each established connection counts as one so the observation is always 1.
SumStats::observe("conn established",
SumStats::Key(),
SumStats::Observation($num=1));
}
event bro_init()
{
# Create the reducer.
# The reducer attaches to the "conn established" observation stream
# and uses the summing calculation on the observations.
local r1 = SumStats::Reducer($stream="conn established",
$apply=set(SumStats::SUM));
# Create the final sumstat.
# We give it an arbitrary name and make it collect data every minute.
# The reducer is then attached and a $epoch_result callback is given
# to finally do something with the data collected.
SumStats::create([$name = "counting connections",
$epoch = 1min,
$reducers = set(r1),
$epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) =
{
# This is the body of the callback that is called when a single
# result has been collected. We are just printing the total number
# of connections that were seen. The $sum field is provided as a
# double type value so we need to use %f as the format specifier.
print fmt("Number of connections established: %.0f", result["conn established"]$sum);
}]);
}

View file

@ -0,0 +1,45 @@
@load base/frameworks/sumstats
# We use the connection_attempt event to limit our observations to those
# which were attempted and not successful.
event connection_attempt(c: connection)
{
# Make an observation!
# This observation is about the host attempting the connection.
# Each established connection counts as one so the observation is always 1.
SumStats::observe("conn attempted",
SumStats::Key($host=c$id$orig_h),
SumStats::Observation($num=1));
}
event bro_init()
{
# Create the reducer.
# The reducer attaches to the "conn attempted" observation stream
# and uses the summing calculation on the observations. Keep
# in mind that there will be one result per key (connection originator).
local r1 = SumStats::Reducer($stream="conn attempted",
$apply=set(SumStats::SUM));
# Create the final sumstat.
# This is slightly different from the last example since we're providing
# a callback to calculate a value to check against the threshold with
# $threshold_val. The actual threshold itself is provided with $threshold.
# Another callback is provided for when a key crosses the threshold.
SumStats::create([$name = "finding scanners",
$epoch = 5min,
$reducers = set(r1),
# Provide a threshold.
$threshold = 5.0,
# Provide a callback to calculate a value from the result
# to check against the threshold field.
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return result["conn attempted"]$sum;
},
# Provide a callback for when a key crosses the threshold.
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
print fmt("%s attempted %.0f or more connections", key$host, result["conn attempted"]$sum);
}]);
}

105
doc/frameworks/sumstats.rst Normal file
View file

@ -0,0 +1,105 @@
.. _sumstats-framework:
==================
Summary Statistics
==================
.. rst-class:: opening
Measuring aspects of network traffic is an extremely common task in Bro.
Bro provides data structures which make this very easy as well in
simplistic cases such as size limited trace file processing. In
real-world deployments though, there are difficulties that arise from
clusterization (many processes sniffing traffic) and unbounded data sets
(traffic never stops). The Summary Statistics (otherwise referred to as
SumStats) framework aims to define a mechanism for consuming unbounded
data sets and making them measurable in practice on large clustered and
non-clustered Bro deployments.
.. contents::
Overview
========
The Sumstat processing flow is broken into three pieces. Observations, where
some aspect of an event is observed and fed into the Sumstats framework.
Reducers, where observations are collected and measured, typically by taking
some sort of summary statistic measurement like average or variance (among
others). Sumstats, where reducers have an epoch (time interval) that their
measurements are performed over along with callbacks for monitoring thresholds
or viewing the collected and measured data.
Terminology
===========
Observation
A single point of data. Observations have a few components of their
own. They are part of an arbitrarily named observation stream, they
have a key that is something the observation is about, and the actual
observation itself.
Reducer
Calculations are applied to an observation stream here to reduce the
full unbounded set of observations down to a smaller representation.
Results are collected within each reducer per-key so care must be
taken to keep the total number of keys tracked down to a reasonable
level.
Sumstat
The final definition of a Sumstat where one or more reducers is
collected over an interval, also known as an epoch. Thresholding can
be applied here along with a callback in the event that a threshold is
crossed. Additionally, a callback can be provided to access each
result (per-key) at the end of each epoch.
Examples
========
These examples may seem very simple to an experienced Bro script developer and
they're intended to look that way. Keep in mind that these scripts will work
on small single process Bro instances as well as large many-worker clusters.
The complications from dealing with flow based load balancing can be ignored
by developers writing scripts that use Sumstats due to its built-in cluster
transparency.
Printing the number of connections
----------------------------------
Sumstats provides a simple way of approaching the problem of trying to count
the number of connections over a given time interval. Here is a script with
inline documentation that does this with the Sumstats framework:
.. btest-include:: ${DOC_ROOT}/frameworks/sumstats-countconns.bro
When run on a sample PCAP file from the Bro test suite, the following output
is created:
.. btest:: sumstats-countconns
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/workshop_2011_browse.trace ${DOC_ROOT}/frameworks/sumstats-countconns.bro
Toy scan detection
------------------
Taking the previous example even further, we can implement a simple detection
to demonstrate the thresholding functionality. This example is a toy to
demonstrate how thresholding works in Sumstats and is not meant to be a
real-world functional example, that is left to the
:doc:`/scripts/policy/misc/scan.bro` script that is included with Bro.
.. btest-include:: ${DOC_ROOT}/frameworks/sumstats-toy-scan.bro
Let's see if there are any hosts that crossed the threshold in a PCAP file
containing a host running nmap:
.. btest:: sumstats-toy-scan
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/nmap-vsn.trace ${DOC_ROOT}/frameworks/sumstats-toy-scan.bro
It seems the host running nmap was detected!

View file

@ -0,0 +1,24 @@
global mime_to_ext: table[string] of string = {
["application/x-dosexec"] = "exe",
["text/plain"] = "txt",
["image/jpeg"] = "jpg",
["image/png"] = "png",
["text/html"] = "html",
};
event file_new(f: fa_file)
{
if ( f$source != "HTTP" )
return;
if ( ! f?$mime_type )
return;
if ( f$mime_type !in mime_to_ext )
return;
local fname = fmt("%s-%s.%s", f$source, f$id, mime_to_ext[f$mime_type]);
print fmt("Extracting file %s", fname);
Files::add_analyzer(f, Files::ANALYZER_EXTRACT, [$extract_filename=fname]);
}

View file

@ -0,0 +1,5 @@
event http_reply(c: connection, version: string, code: count, reason: string)
{
if ( /^[hH][tT][tT][pP]:/ in c$http$uri && c$http$status_code == 200 )
print fmt("A local server is acting as an open proxy: %s", c$id$resp_h);
}

View file

@ -0,0 +1,26 @@
module HTTP;
export {
global success_status_codes: set[count] = {
200,
201,
202,
203,
204,
205,
206,
207,
208,
226,
304
};
}
event http_reply(c: connection, version: string, code: count, reason: string)
{
if ( /^[hH][tT][tT][pP]:/ in c$http$uri &&
c$http$status_code in HTTP::success_status_codes )
print fmt("A local server is acting as an open proxy: %s", c$id$resp_h);
}

View file

@ -0,0 +1,31 @@
@load base/utils/site
redef Site::local_nets += { 192.168.0.0/16 };
module HTTP;
export {
global success_status_codes: set[count] = {
200,
201,
202,
203,
204,
205,
206,
207,
208,
226,
304
};
}
event http_reply(c: connection, version: string, code: count, reason: string)
{
if ( Site::is_local_addr(c$id$resp_h) &&
/^[hH][tT][tT][pP]:/ in c$http$uri &&
c$http$status_code in HTTP::success_status_codes )
print fmt("A local server is acting as an open proxy: %s", c$id$resp_h);
}

View file

@ -0,0 +1,40 @@
@load base/utils/site
@load base/frameworks/notice
redef Site::local_nets += { 192.168.0.0/16 };
module HTTP;
export {
redef enum Notice::Type += {
Open_Proxy
};
global success_status_codes: set[count] = {
200,
201,
202,
203,
204,
205,
206,
207,
208,
226,
304
};
}
event http_reply(c: connection, version: string, code: count, reason: string)
{
if ( Site::is_local_addr(c$id$resp_h) &&
/^[hH][tT][tT][pP]:/ in c$http$uri &&
c$http$status_code in HTTP::success_status_codes )
NOTICE([$note=HTTP::Open_Proxy,
$msg=fmt("A local server is acting as an open proxy: %s",
c$id$resp_h),
$conn=c,
$identifier=cat(c$id$resp_h),
$suppress_for=1day]);
}

162
doc/httpmonitor/index.rst Normal file
View file

@ -0,0 +1,162 @@
.. _http-monitor:
================================
Monitoring HTTP Traffic with Bro
================================
Bro can be used to log the entire HTTP traffic from your network to the
http.log file. This file can then be used for analysis and auditing
purposes.
In the sections below we briefly explain the structure of the http.log
file, then we show you how to perform basic HTTP traffic monitoring and
analysis tasks with Bro. Some of these ideas and techniques can later be
applied to monitor different protocols in a similar way.
----------------------------
Introduction to the HTTP log
----------------------------
The http.log file contains a summary of all HTTP requests and responses
sent over a Bro-monitored network. Here are the first few columns of
``http.log``::
# ts uid orig_h orig_p resp_h resp_p
1311627961.8 HSH4uV8KVJg 192.168.1.100 52303 192.150.187.43 80
Every single line in this log starts with a timestamp, a unique
connection identifier (UID), and a connection 4-tuple (originator
host/port and responder host/port). The UID can be used to identify all
logged activity (possibly across multiple log files) associated with a
given connection 4-tuple over its lifetime.
The remaining columns detail the activity that's occurring. For
example, the columns on the line below (shortened for brevity) show a
request to the root of Bro website::
# method host uri referrer user_agent
GET bro.org / - <...>Chrome/12.0.742.122<...>
Network administrators and security engineers, for instance, can use the
information in this log to understand the HTTP activity on the network
and troubleshoot network problems or search for anomalous activities. We must
stress that there is no single right way to perform an analysis. It will
depend on the expertise of the person performing the analysis and the
specific details of the task.
For more information about how to handle the HTTP protocol in Bro,
including a complete list of the fields available in http.log, go to
Bro's :doc:`HTTP script reference
</scripts/base/protocols/http/main.bro>`.
------------------------
Detecting a Proxy Server
------------------------
A proxy server is a device on your network configured to request a
service on behalf of a third system; one of the most common examples is
a Web proxy server. A client without Internet access connects to the
proxy and requests a web page, the proxy sends the request to the web
server, which receives the response, and passes it to the original
client.
Proxies were conceived to help manage a network and provide better
encapsulation. Proxies by themselves are not a security threat, but a
misconfigured or unauthorized proxy can allow others, either inside or
outside the network, to access any web site and even conduct malicious
activities anonymously using the network's resources.
What Proxy Server traffic looks like
-------------------------------------
In general, when a client starts talking with a proxy server, the
traffic consists of two parts: (i) a GET request, and (ii) an HTTP/
reply::
Request: GET http://www.bro.org/ HTTP/1.1
Reply: HTTP/1.0 200 OK
This will differ from traffic between a client and a normal Web server
because GET requests should not include "http" on the string. So we can
use this to identify a proxy server.
We can write a basic script in Bro to handle the http_reply event and
detect a reply for a ``GET http://`` request.
.. btest-include:: ${DOC_ROOT}/httpmonitor/http_proxy_01.bro
.. btest:: http_proxy_01
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/proxy.pcap ${DOC_ROOT}/httpmonitor/http_proxy_01.bro
Basically, the script is checking for a "200 OK" status code on a reply
for a request that includes "http:" (case insensitive). In reality, the
HTTP protocol defines several success status codes other than 200, so we
will extend our basic script to also consider the additional codes.
.. btest-include:: ${DOC_ROOT}/httpmonitor/http_proxy_02.bro
.. btest:: http_proxy_02
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/proxy.pcap ${DOC_ROOT}/httpmonitor/http_proxy_02.bro
Next, we will make sure that the responding proxy is part of our local
network.
.. btest-include:: ${DOC_ROOT}/httpmonitor/http_proxy_03.bro
.. btest:: http_proxy_03
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/proxy.pcap ${DOC_ROOT}/httpmonitor/http_proxy_03.bro
.. note::
The redefinition of :bro:see:`Site::local_nets` is only done inside
this script to make it a self-contained example. It's typically
redefined somewhere else.
Finally, our goal should be to generate an alert when a proxy has been
detected instead of printing a message on the console output. For that,
we will tag the traffic accordingly and define a new ``Open_Proxy``
``Notice`` type to alert of all tagged communications. Once a
notification has been fired, we will further suppress it for one day.
Below is the complete script.
.. btest-include:: ${DOC_ROOT}/httpmonitor/http_proxy_04.bro
.. btest:: http_proxy_04
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/proxy.pcap ${DOC_ROOT}/httpmonitor/http_proxy_04.bro
@TEST-EXEC: btest-rst-include notice.log
Note that this script only logs the presence of the proxy to
``notice.log``, but if an additional email is desired (and email
functionality is enabled), then that's done simply by redefining
:bro:see:`Notice::emailed_types` to add the ``Open_proxy`` notice type
to it.
----------------
Inspecting Files
----------------
Files are often transmitted on regular HTTP conversations between a
client and a server. Most of the time these files are harmless, just
images and some other multimedia content, but there are also types of
files, specially executable files, that can damage your system. We can
instruct Bro to create a copy of all files of certain types that it sees
using the :ref:`File Analysis Framework <file-analysis-framework>`
(introduced with Bro 2.2):
.. btest-include:: ${DOC_ROOT}/httpmonitor/file_extraction.bro
.. btest:: file_extraction
@TEST-EXEC: btest-rst-cmd -n 5 bro -r ${TRACES}/http/bro.org.pcap ${DOC_ROOT}/httpmonitor/file_extraction.bro
Here, the ``mime_to_ext`` table serves two purposes. It defines which
mime types to extract and also the file suffix of the extracted files.
Extracted files are written to a new ``extract_files`` subdirectory.
Also note that the first conditional in the :bro:see:`file_new` event
handler can be removed to make this behavior generic to other protocols
besides HTTP.

View file

@ -1,91 +1,51 @@
.. Bro documentation master file .. Bro documentation master file
================= ==========
Bro Documentation Bro Manual
==========
Introduction Section
====================
.. toctree::
:maxdepth: 2
intro/index.rst
cluster/index.rst
install/index.rst
quickstart/index.rst
..
.. _using-bro:
Using Bro Section
================= =================
Guides .. toctree::
------ :maxdepth: 2
logs/index.rst
httpmonitor/index.rst
broids/index.rst
mimestats/index.rst
..
Reference Section
=================
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 2
INSTALL scripting/index.rst
upgrade frameworks/index.rst
quickstart script-reference/index.rst
faq components/index.rst
reporting-problems
Frameworks ..
----------
.. toctree::
:maxdepth: 1
notice
logging
input
file-analysis
cluster
signatures
How-Tos
-------
.. toctree::
:maxdepth: 1
geoip
Script Reference
----------------
.. toctree::
:maxdepth: 1
scripts/packages
scripts/index
scripts/builtins
scripts/proto-analyzers
Other Bro Components
--------------------
The following are snapshots of documentation for components that come
with this version of Bro (|version|). Since they can also be used
independently, see the `download page
<http://bro.org/download/index.html>`_ for documentation of any
current, independent component releases.
.. toctree::
:maxdepth: 1
BinPAC - A protocol parser generator <components/binpac/README>
Broccoli - The Bro Client Communication Library (README) <components/broccoli/README>
Broccoli - User Manual <components/broccoli/broccoli-manual>
Broccoli Python Bindings <components/broccoli-python/README>
Broccoli Ruby Bindings <components/broccoli-ruby/README>
BroControl - Interactive Bro management shell <components/broctl/README>
Bro-Aux - Small auxiliary tools for Bro <components/bro-aux/README>
BTest - A unit testing framework <components/btest/README>
Capstats - Command-line packet statistic tool <components/capstats/README>
PySubnetTree - Python module for CIDR lookups<components/pysubnettree/README>
trace-summary - Script for generating break-downs of network traffic <components/trace-summary/README>
The `Broccoli API Reference <broccoli-api/index.html>`_ may also be of
interest.
Other Indices and References
----------------------------
* :ref:`General Index <genindex>` * :ref:`General Index <genindex>`
* `Notice Index <bro-noticeindex.html>`_
* :ref:`search` * :ref:`search`
Internal References
-------------------
.. toctree::
:maxdepth: 1
scripts/internal

View file

@ -0,0 +1 @@
../../aux/binpac/CHANGES

View file

@ -0,0 +1 @@
../../aux/bro-aux/CHANGES

1
doc/install/CHANGES-bro.txt Symbolic link
View file

@ -0,0 +1 @@
../../CHANGES

View file

@ -0,0 +1 @@
../../aux/broccoli/bindings/broccoli-python/CHANGES

View file

@ -0,0 +1 @@
../../aux/broccoli/bindings/broccoli-ruby/CHANGES

View file

@ -0,0 +1 @@
../../aux/broccoli/CHANGES

View file

@ -0,0 +1 @@
../../aux/broctl/CHANGES

View file

@ -0,0 +1 @@
../../aux/btest/CHANGES

View file

@ -0,0 +1 @@
../../aux/broctl/aux/capstats/CHANGES

View file

@ -0,0 +1 @@
../../aux/broctl/aux/pysubnettree/CHANGES

View file

@ -0,0 +1 @@
../../aux/broctl/aux/trace-summary/CHANGES

1
doc/install/NEWS.rst Symbolic link
View file

@ -0,0 +1 @@
../../NEWS

75
doc/install/changes.rst Normal file
View file

@ -0,0 +1,75 @@
========================
Detailed Version History
========================
.. contents::
---
Bro
---
.. literalinclude:: CHANGES-bro.txt
----------
BroControl
----------
.. literalinclude:: CHANGES-broctl.txt
--------
Broccoli
--------
.. literalinclude:: CHANGES-broccoli.txt
---------------
Broccoli Python
---------------
.. literalinclude:: CHANGES-broccoli-python.txt
-------------
Broccoli Ruby
-------------
.. literalinclude:: CHANGES-broccoli-ruby.txt
--------
Capstats
--------
.. literalinclude:: CHANGES-capstats.txt
-------------
Trace-Summary
-------------
.. literalinclude:: CHANGES-trace-summary.txt
------
BinPAC
------
.. literalinclude:: CHANGES-binpac.txt
-------
Bro-Aux
-------
.. literalinclude:: CHANGES-bro-aux.txt
-----
BTest
-----
.. literalinclude:: CHANGES-btest.txt
------------
PySubnetTree
------------
.. literalinclude:: CHANGES-pysubnettree.txt

View file

@ -0,0 +1,47 @@
.. _upgrade-guidelines:
==============
How to Upgrade
==============
If you're doing an upgrade install (rather than a fresh install),
there's two suggested approaches: either install Bro using the same
installation prefix directory as before, or pick a new prefix and copy
local customizations over. Regardless of which approach you choose,
if you are using BroControl, then after upgrading Bro you will need to
run "broctl check" (to verify that your new configuration is OK)
and "broctl install" to complete the upgrade process.
In the following we summarize general guidelines for upgrading, see
the :ref:`release-notes` for version-specific information.
Reusing Previous Install Prefix
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you choose to configure and install Bro with the same prefix
directory as before, local customization and configuration to files in
``$prefix/share/bro/site`` and ``$prefix/etc`` won't be overwritten
(``$prefix`` indicating the root of where Bro was installed). Also, logs
generated at run-time won't be touched by the upgrade. Backing up local
changes before upgrading is still recommended.
After upgrading, remember to check ``$prefix/share/bro/site`` and
``$prefix/etc`` for ``.example`` files, which indicate that the
distribution's version of the file differs from the local one, and therefore,
may include local changes. Review the differences and make adjustments
as necessary. Use the new version for differences that aren't a result of
a local change.
Using a New Install Prefix
~~~~~~~~~~~~~~~~~~~~~~~~~~
To install the newer version in a different prefix directory than before,
copy local customization and configuration files from ``$prefix/share/bro/site``
and ``$prefix/etc`` to the new location (``$prefix`` indicating the root of
where Bro was originally installed). Review the files for differences
before copying and make adjustments as necessary (use the new version for
differences that aren't a result of a local change). Of particular note,
the copied version of ``$prefix/etc/broctl.cfg`` is likely to need changes
to the ``SpoolDir`` and ``LogDir`` settings.

12
doc/install/index.rst Normal file
View file

@ -0,0 +1,12 @@
.. _installation:
============
Installation
============
.. toctree::
:maxdepth: 2
install
upgrade

211
doc/install/install.rst Normal file
View file

@ -0,0 +1,211 @@
.. _CMake: http://www.cmake.org
.. _SWIG: http://www.swig.org
.. _Xcode: https://developer.apple.com/xcode/
.. _MacPorts: http://www.macports.org
.. _Fink: http://www.finkproject.org
.. _Homebrew: http://brew.sh
.. _bro downloads page: http://bro.org/download/index.html
.. _installing-bro:
==============
Installing Bro
==============
.. contents::
Prerequisites
=============
Before installing Bro, you'll need to ensure that some dependencies
are in place.
Required Dependencies
---------------------
Bro requires the following libraries and tools to be installed
before you begin:
* Libpcap (http://www.tcpdump.org)
* OpenSSL libraries (http://www.openssl.org)
* BIND8 library
* Libz
* Bash (for BroControl)
* Python (for BroControl)
To build Bro from source, the following additional dependencies are required:
* CMake 2.6.3 or greater (http://www.cmake.org)
* Make
* C/C++ compiler
* SWIG (http://www.swig.org)
* Bison (GNU Parser Generator)
* Flex (Fast Lexical Analyzer)
* Libpcap headers (http://www.tcpdump.org)
* OpenSSL headers (http://www.openssl.org)
* zlib headers
* Perl
To install the required dependencies, you can use (when done, make sure
that ``bash`` and ``python`` are in your ``PATH``):
* RPM/RedHat-based Linux:
.. console::
sudo yum install cmake make gcc gcc-c++ flex bison libpcap-devel openssl-devel python-devel swig zlib-devel
* DEB/Debian-based Linux:
.. console::
sudo apt-get install cmake make gcc g++ flex bison libpcap-dev libssl-dev python-dev swig zlib1g-dev
* FreeBSD:
Most required dependencies should come with a minimal FreeBSD install
except for the following.
.. console::
sudo pkg_add -r bash cmake swig bison python perl
* Mac OS X:
Compiling source code on Macs requires first downloading Xcode_,
then going through its "Preferences..." -> "Downloads" menus to
install the "Command Line Tools" component.
OS X comes with all required dependencies except for CMake_ and SWIG_.
Distributions of these dependencies can likely be obtained from your
preferred Mac OS X package management system (e.g. MacPorts_, Fink_,
or Homebrew_). Specifically for MacPorts, the ``cmake``, ``swig``,
and ``swig-python`` packages provide the required dependencies.
Optional Dependencies
---------------------
Bro can make use of some optional libraries and tools if they are found at
build time:
* LibGeoIP (for geolocating IP addresses)
* sendmail (enables Bro and BroControl to send mail)
* gawk (enables all features of bro-cut)
* curl (used by a Bro script that implements active HTTP)
* gperftools (tcmalloc is used to improve memory and CPU usage)
* ipsumdump (for trace-summary; http://www.cs.ucla.edu/~kohler/ipsumdump)
* Ruby executable, library, and headers (for Broccoli Ruby bindings)
LibGeoIP is probably the most interesting and can be installed
on most platforms by following the instructions for :ref:`installing
libGeoIP and the GeoIP database
<geolocation>`.
Installing Bro
==============
Bro can be downloaded in either pre-built binary package or source
code forms.
Using Pre-Built Binary Release Packages
=======================================
See the `bro downloads page`_ for currently supported/targeted
platforms for binary releases.
* RPM
.. console::
sudo yum localinstall Bro-*.rpm
* DEB
.. console::
sudo gdebi Bro-*.deb
* MacOS Disk Image with Installer
Just open the ``Bro-*.dmg`` and then run the ``.pkg`` installer.
Everything installed by the package will go into ``/opt/bro``.
The primary install prefix for binary packages is ``/opt/bro``.
Non-MacOS packages that include BroControl also put variable/runtime
data (e.g. Bro logs) in ``/var/opt/bro``.
Installing from Source
==========================
Bro releases are bundled into source packages for convenience and are
available on the `bro downloads page`_. Alternatively, the latest
Bro development version can be obtained through git repositories
hosted at ``git.bro.org``. See our `git development documentation
<http://bro.org/development/howtos/process.html>`_ for comprehensive
information on Bro's use of git revision control, but the short story
for downloading the full source code experience for Bro via git is:
.. console::
git clone --recursive git://git.bro.org/bro
.. note:: If you choose to clone the ``bro`` repository
non-recursively for a "minimal Bro experience", be aware that
compiling it depends on several of the other submodules as well.
The typical way to build and install from source is (for more options,
run ``./configure --help``):
.. console::
./configure
make
make install
The default installation path is ``/usr/local/bro``, which would typically
require root privileges when doing the ``make install``. A different
installation path can be chosen by specifying the ``--prefix`` option.
Note that ``/usr`` and ``/opt/bro`` are the
standard prefixes for binary Bro packages to be installed, so those are
typically not good choices unless you are creating such a package.
Depending on the Bro package you downloaded, there may be auxiliary
tools and libraries available in the ``aux/`` directory. Some of them
will be automatically built and installed along with Bro. There are
``--disable-*`` options that can be given to the configure script to
turn off unwanted auxiliary projects that would otherwise be installed
automatically. Finally, use ``make install-aux`` to install some of
the other programs that are in the ``aux/bro-aux`` directory.
OpenBSD users, please see our `FAQ
<http://www.bro.org/documentation/faq.html>`_ if you are having
problems installing Bro.
Finally, if you want to build the Bro documentation (not required, because
all of the documentation for the latest Bro release is available on the
Bro web site), there are instructions in ``doc/README`` in the source
distribution.
Configure the Run-Time Environment
==================================
Just remember that you may need to adjust your ``PATH`` environment variable
according to the platform/shell/package you're using. For example:
Bourne-Shell Syntax:
.. console::
export PATH=/usr/local/bro/bin:$PATH
C-Shell Syntax:
.. console::
setenv PATH /usr/local/bro/bin:$PATH
Or substitute ``/opt/bro/bin`` instead if you installed from a binary package.

View file

@ -0,0 +1,13 @@
.. _release-notes:
=============
Release Notes
=============
.. contents::
.. include:: NEWS.rst

10
doc/install/upgrade.rst Normal file
View file

@ -0,0 +1,10 @@
=============
Upgrading Bro
=============
.. toctree::
guidelines
release-notes
changes

BIN
doc/intro/architecture.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

BIN
doc/intro/bro-eyes.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

BIN
doc/intro/history.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 159 KiB

247
doc/intro/index.rst Normal file
View file

@ -0,0 +1,247 @@
============
Introduction
============
.. contents::
Overview
--------
Bro is a passive, open-source network traffic analyzer. It is
primarily a security monitor that inspects all traffic on a link in
depth for signs of suspicious activity. More generally, however,
Bro supports a wide range of traffic analysis tasks even outside of
the security domain, including performance measurements and helping
with trouble-shooting.
The most immediate benefit that a site gains from deploying Bro is an
extensive set of *log files* that record a network's activity in
high-level terms. These logs include not only a comprehensive record
of every connection seen on the wire, but also application-layer
transcripts such as, e.g., all HTTP sessions with their requested
URIs, key headers, MIME types, and server responses; DNS requests with
replies; SSL certificates; key content of SMTP sessions; and much
more. By default, Bro writes all this information into well-structured
tab-separated log files suitable for post-processing with external
software. Users can however also chose from a set of alternative
output formats and backends to interface directly with, e.g., external
databases.
In addition to the logs, Bro comes with built-in functionality for a
range of analysis and detection tasks, including extracting files from
HTTP sessions, detecting malware by interfacing to external
registries, reporting vulnerable versions of software seen on the
network, identifying popular web applications, detecting SSH
brute-forcing, validating SSL certificate chains, and much more.
However, the key to understanding Bro lies in realizing that even
though the system comes with such powerful functionality out of the
box, fundamentally it represents a *platform* for traffic analyses
that's fully customizable and extensible: Bro provides users with a
domain-specific, Turing-complete *scripting language* for expressing
arbitrary analysis tasks. Conceptually, you can think of Bro as a
"domain-specific Python" (or Perl): just like Python, the system comes
with a large set of pre-built functionality (the "standard library"),
yet you are not limited to what the system ships with but can put Bro
to use in novel ways by writing your own code. Indeed, all of Bro's
default analyses, including all the logging, is the result of such
scripts; there's no specific analysis hard-coded into the core of
system.
Bro runs on commodity hardware and hence provides a low-cost
alternative to expensive proprietary solutions. Despite the price tag,
however, Bro actually goes far beyond the capabilities of other
network monitoring tools, which typically remain limited to a small
set of hard-coded analysis tasks. We emphasize in particular that Bro
is *not* a classic signature-based intrusion detection system (IDS).
While it supports such standard functionality as well, Bro's scripting
language indeed facilitates a much broader spectrum of very different
approaches to finding malicious activity, including semantic misuse
detection, anomaly detection, and behavioral analysis.
A large variety of sites deploy Bro operationally for protecting their
cyberinfrastructure, including many universities, research labs,
supercomputing centers, open-science communities, and major
corporations. Bro specifically targets high-speed, high-volume network
monitoring, and an increasing number of sites are now using the system
to monitor their 10GE networks, with some already moving on to 100GE
links. Bro accommodates such high-performance settings by supporting
scalable load-balancing: large sites typically run "Bro Clusters" in
which a high-speed frontend load-balancer distributes the traffic
across an appropriate number of backend PCs, all running dedicated Bro
instances on their individual traffic slices. A central manager system
coordinates the process, synchronizing state across the backends and
providing the operators with a central management interface for
configuration and access to aggregated logs. Bro's integrated
management framework, BroControl, supports such cluster setups
out-of-the-box.
Features
--------
Bro supports a wide range of analyses through its scripting language.
Yet even without further customization it comes with a powerful set of
features.
- Deployment
* Runs on commodity hardware on standard UNIX-style systems
(including Linux, FreeBSD, and MacOS).
* Fully passive traffic analysis off a network tap or monitoring
port.
* Standard libpcap interface for capturing packets.
* Real-time and offline analysis.
* Cluster-support for large-scale deployments.
* Unified management framework for operating both standalone and
cluster setups.
* Open-source under a BSD license.
- Analysis
* Comprehensive logging of activity for offline analysis and
forensics.
* Port-independent analysis of application-layer protocols.
* Support for many application-layer protocols (including DNS,
FTP, HTTP, IRC, SMTP, SSH, SSL).
* Analysis of file content exchanged over application-layer
protocols, including MD5/SHA1 computation for fingerprinting.
* Comprehensive IPv6 support.
* Tunnel detection and analysis (including Ayiya, Teredo, GTPv1).
Bro decapsulates the tunnels and then proceeds to analyze their
content as if no tunnel was in place.
* Extensive sanity checks during protocol analysis.
* Support for IDS-style pattern matching.
- Scripting Language
* Turing-complete language for expression arbitrary analysis
tasks.
* Event-based programming model.
* Domain-specific data types such as IP addresses (transparently
handling both IPv4 and IPv6), port numbers, and timers.
* Extensive support for tracking and managing network state over
time.
- Interfacing
* Default output to well-structured ASCII logs.
* Alternative backends for ElasticSearch and DataSeries. Further
database interfaces in preparation.
* Real-time integration of external input into analyses. Live
database input in preparation.
* External C library for exchanging Bro events with external
programs. Comes with Perl, Python, and Ruby bindings.
* Ability to trigger arbitrary external processes from within
the scripting language.
History
-------
.. figure:: history.png
:width: 600
:align: center
:alt: Bro History Timeline
:target: ../_images/history.png
Timeline of Bro's History (click to enlarge).
Bro's history goes back much further than many people realize. `Vern
Paxson <http://www.icir.org/vern>`_ designed and implemented the
initial version almost two decades ago.
Vern began work on the code in 1995 as a researcher at the `Lawrence
Berkeley National Laboratory (LBNL) <http://www.lbl.gov>`_. Berkeley
Lab began operational deployment in 1996, and the USENIX Security
Symposium published the original Bro paper in 1998 (later refined in a
subsequent `journal publication <http://www.icir.org/vern/papers/bro-CN99.pdf>`_).
In 2003, the
`National Science Foundation (NSF) <http://www.nsf.gov>`_ began
supporting research and advanced development on Bro at the
`International Computer Science Institute (ICSI)
<http://www.icsi.berkeley.edu>`_, where Vern now leads the `Networking
and Security group <http://www.icir.org>`_. Over the years, a growing
team of ICSI researchers and students kept adding novel functionality
to Bro, while LBNL continued its support with funding from the
`Department of Energy (DOE) <http://www.doe.gov>`_.
Much of Bro's capabilities originate in academic research projects,
with results often published at top-tier conferences. However, the key
to Bro's success was its ability to bridge the traditional gap between
academia and operations from early on, which provided the research
with crucial grounding to ensure that developed approaches stand up to
the challenges of the real world. Yet, with Bro's operational user
community growing over time, the research-centric development model
eventually became a bottleneck to the system's evolution: research
grants do not tend to support the more mundane parts of software
development and maintenance, even though those prove crucial for the
end-user experience. While Bro's capabilities always went beyond those
of traditional systems, a successful deployment used to require
significant technical expertise, typically with a large upfront
investment in tackling Bro's steep learning curve. In 2010, NSF set
out to address this gap by awarding ICSI a grant dedicated solely to
Bro development out of its SDCI program.
With that support in place, the `National Center for
Supercomputing Applications (NCSA) <http://www.ncsa.illinois.edu>`_
joined the team as a core partner, and the Bro Project began to
completely overhaul many of the user-visible parts of the system for
the 2.0 release. Since that version came out, Bro has experienced an
tremendous growth in new deployments across a diverse range of
settings, and the Bro team is now working to build on this success by
further advancing the system's capabilities to address the challenges
of future networks.
Architecture
------------
.. figure:: architecture.png
:width: 400
:align: center
:alt: Bro Architecture
:target: ../_images/architecture.png
Bro's internal architecture.
Architecturally, Bro is layered into two major components. Its *event
engine* (or *core*) reduces the incoming packet stream into a series
of higher-level *events*. These events reflect network activity in
policy-neutral terms, i.e., they describe *what* has been seen, but
not *why*, or whether it is significant. For example, every HTTP
request on the wire turns into a corresponding ``http_request`` event
that carries with it the involved IP addresses and ports, the URI
being requested, and the HTTP version in use. The event however does
not convey any further *interpretation*, e.g., of whether that URI
corresponds to a known malware site.
Such semantics are instead derived by Bro's second main component, the
*script interpreter*, which executes a set of *event handlers* written
in Bro's custom scripting language. These scripts can express a site's
security policy, i.e., what actions to take when the monitor detects
different types of activity. More generally they can derive any
desired properties and statistics from the input traffic. Bro's
language comes with extensive domain-specific types and support
functionality; and, crucially, allows scripts to maintain state
over time, enabling them to track and correlate the evolution of what
they observe across connection and host boundaries. Bro scripts can
generate real-time alerts and also execute arbitrary external programs
on demand, e.g., to trigger an active response to an attack.

293
doc/logs/index.rst Normal file
View file

@ -0,0 +1,293 @@
.. _bro-logging:
===========
Bro Logging
===========
.. contents::
Once Bro has been deployed in an environment and monitoring live
traffic, it will, in its default configuration, begin to produce
human-readable ASCII logs. Each log file, produced by Bro's
:ref:`framework-logging`, is populated with organized, mostly
connection-oriented data. As the standard log files are simple ASCII
data, working with the data contained in them can be done from a
command line terminal once you have been familiarized with the types
of data that can be found in each file. In the following, we work
through the logs general structure and then examine some standard ways
of working with them.
----------------------
Working with Log Files
----------------------
Generally, all of Bro's log files are produced by a corresponding
script that defines their individual structure. However, as each log
file flows through the Logging Framework, they share a set of
structural similarities. Without breaking into the scripting aspect of
Bro here, a bird's eye view of how the log files are produced
progresses as follows. The script's author defines the kinds of data,
such as the originating IP address or the duration of a connection,
which will make up the fields (i.e., columns) of the log file. The
author then decides what network activity should generate a single log
file entry (i.e., one line). For example, this could be a connection
having been completed or an HTTP ``GET`` request being issued by an
originator. When these behaviors are observed during operation, the
data is passed to the Logging Framework which adds the entry
to the appropriate log file.
As the fields of the log entries can be further customized by the
user, the Logging Framework makes use of a header block to ensure that
it remains self-describing. This header entry can be see by running
the Unix utility ``head`` and outputting the first lines of the file:
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd bro -r $TRACES/wikipedia.trace
@TEST-EXEC: btest-rst-include -n 15 conn.log
As you can see, the header consists of lines prefixed by ``#`` and
includes information such as what separators are being used for
various types of data, what an empty field looks like and what an
unset field looks like. In this example, the default TAB separator is
being used as the delimiter between fields (``\x09`` is the tab
character in hex). It also lists the comma as the separator for set
data, the string ``(empty)`` as the indicator for an empty field and
the ``-`` character as the indicator for a field that hasn't been set.
The timestamp for when the file was created is included under
``#open``. The header then goes on to detail the fields being listed
in the file and the data types of those fields, in ``#fields`` and
``#types``, respectively. These two entries are often the two most
significant points of interest as they detail not only the field names
but the data types used. When navigating through the different log
files with tools like ``sed``, ``awk``, or ``grep``, having the field
definitions readily available saves the user some mental leg work. The
field names are also a key resource for using the :ref:`bro-cut
<bro-cut>` utility included with Bro, see below.
Next to the header follows the main content. In this example we see 7
connections with their key properties, such as originator and
responder IP addresses (note how Bro transparently handles both IPv4 and
IPv6), transport-layer ports, application-layer services ( - the
``service`` field is filled in as Bro determines a specific protocol to
be in use, independent of the connection's ports), payload size, and
more. See :bro:type:`Conn::Info` for a description of all fields.
In addition to ``conn.log``, Bro generates many further logs by
default, including:
``dpd.log``
A summary of protocols encountered on non-standard ports.
``dns.log``
All DNS activity.
``ftp.log``
A log of FTP session-level activity.
``files.log``
Summaries of files transferred over the network. This information
is aggregated from different protocols, including HTTP, FTP, and
SMTP.
``http.log``
A summary of all HTTP requests with their replies.
``known_certs.log``
SSL certificates seen in use.
``smtp.log``
A summary of SMTP activity.
``ssl.log``
A record of SSL sessions, including certificates being used.
``weird.log``
A log of unexpected protocol-level activity. Whenever Bro's
protocol analysis encounters a situation it would not expect
(e.g., an RFC violation) it logs it in this file. Note that in
practice, real-world networks tend to exhibit a large number of
such "crud" that is usually not worth following up on.
As you can see, some log files are specific to a particular protocol,
while others aggregate information across different types of activity.
.. _bro-cut:
Using ``bro-cut``
-----------------
The ``bro-cut`` utility can be used in place of other tools to build
terminal commands that remain flexible and accurate independent of
possible changes to the log file itself. It accomplishes this by parsing
the header in each file and allowing the user to refer to the specific
columnar data available (in contrast to tools like ``awk`` that
require the user to refer to fields referenced by their position).
For example, the following command extracts just the given columns
from a ``conn.log``:
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd -n 10 "cat conn.log | bro-cut id.orig_h id.orig_p id.resp_h duration"
The corresponding ``awk`` command will look like this:
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd -n 10 awk \'/^[^#]/ {print \$3, \$4, \$5, \$6, \$9}\' conn.log
While the output is similar, the advantages to using bro-cut over
``awk`` lay in that, while ``awk`` is flexible and powerful, ``bro-cut``
was specifically designed to work with Bro's log files. Firstly, the
``bro-cut`` output includes only the log file entries, while the
``awk`` solution needs to skip the header manually. Secondly, since
``bro-cut`` uses the field descriptors to identify and extract data,
it allows for flexibility independent of the format and contents of
the log file. It's not uncommon for a Bro configuration to add extra
fields to various log files as required by the environment. In this
case, the fields in the ``awk`` command would have to be altered to
compensate for the new position whereas the ``bro-cut`` output would
not change.
.. note::
The sequence of field names given to ``bro-cut`` determines the
output order, which means you can also use ``bro-cut`` to reorder
fields. That can be helpful when piping into, e.g., ``sort``.
As you may have noticed, the command for ``bro-cut`` uses the output
redirection through the ``cat`` command and ``|`` operator. Whereas
tools like ``awk`` allow you to indicate the log file as a command
line option, bro-cut only takes input through redirection such as
``|`` and ``<``. There are a couple of ways to direct log file data
into ``bro-cut``, each dependent upon the type of log file you're
processing. A caveat of its use, however, is that the 8 lines of
header data must be present.
.. note::
``bro-cut`` provides an option ``-c`` to include a corresponding
format header into the output, which allows to chain multiple
``bro-cut`` instances or perform further post-processing that
evaluates the header information.
In its default setup, Bro will rotate log files on an hourly basis,
moving the current log file into a directory with format
``YYYY-MM-DD`` and gzip compressing the file with a file format that
includes the log file type and time range of the file. In the case of
processing a compressed log file you simply adjust your command line
tools to use the complementary ``z*`` versions of commands such as cat
(``zcat``), ``grep`` (``zgrep``), and ``head`` (``zhead``).
Working with Timestamps
-----------------------
``bro-cut`` accepts the flag ``-d`` to convert the epoch time values
in the log files to human-readable format. The following command
includes the human readable time stamp, the unique identifier, the
HTTP ``Host``, and HTTP ``URI`` as extracted from the ``http.log``
file:
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd -n 5 "bro-cut -d ts uid host uri < http.log"
Often times log files from multiple sources are stored in UTC time to
allow easy correlation. Converting the timestamp from a log file to
UTC can be accomplished with the ``-u`` option:
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd -n 5 "bro-cut -u ts uid host uri < http.log"
The default time format when using the ``-d`` or ``-u`` is the
``strftime`` format string ``%Y-%m-%dT%H:%M:%S%z`` which results in a
string with year, month, day of month, followed by hour, minutes,
seconds and the timezone offset. The default format can be altered by
using the ``-D`` and ``-U`` flags, using the standard ``strftime``
syntax. For example, to format the timestamp in the US-typical "Middle
Endian" you could use a format string of: ``%d-%m-%YT%H:%M:%S%z``
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd -n 5 "bro-cut -D %d-%m-%YT%H:%M:%S%z ts uid host uri < http.log"
See ``man strfime`` for more options for the format string.
Using UIDs
----------
While Bro can do signature-based analysis, its primary focus is on
behavioral detection which alters the practice of log review from
"reactionary review" to a process a little more akin to a hunting
trip. A common progression of review includes correlating a session
across multiple log files. As a connection is processed by Bro, a
unique identifier is assigned to each session. This unique identifier
is generally included in any log file entry associated with that
connection and can be used to cross-reference different log files.
A simple example would be to cross-reference a UID seen in a
``conn.log`` file. Here, we're looking for the connection with the
largest number of bytes from the responder by redirecting the output
for ``cat conn.log`` into bro-cut to extract the UID and the
resp_bytes, then sorting that output by the resp_bytes field.
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd "cat conn.log | bro-cut uid resp_bytes | sort -nrk2 | head -5"
Taking the UID of the first of the top responses, we can now
crossreference that with the UIDs in the ``http.log`` file.
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd "cat http.log | bro-cut uid id.resp_h method status_code host uri | grep VW0XPVINV8a"
As you can see there are two HTTP ``GET`` requests within the
session that Bro identified and logged. Given that HTTP is a stream
protocol, it can have multiple ``GET``/``POST``/etc requests in a
stream and Bro is able to extract and track that information for you,
giving you an in-depth and structured view into HTTP traffic on your
network.
-----------------------
Common Log Files
-----------------------
As a monitoring tool, Bro records a detailed view of the traffic inspected
and the events generated in a series of relevant log files. These files can
later be reviewed for monitoring, auditing and troubleshooting purposes.
In this section we present a brief explanation of the most commonly used log
files generated by Bro including links to descriptions of some of the fields
for each log type.
+-----------------+---------------------------------------+------------------------------+
| Log File | Description | Field Descriptions |
+=================+=======================================+==============================+
| http.log | Shows all HTTP requests and replies | :bro:type:`HTTP::Info` |
+-----------------+---------------------------------------+------------------------------+
| ftp.log | Records FTP activity | :bro:type:`FTP::Info` |
+-----------------+---------------------------------------+------------------------------+
| ssl.log | Records SSL sessions including | :bro:type:`SSL::Info` |
| | certificates used | |
+-----------------+---------------------------------------+------------------------------+
| known_certs.log | Includes SSL certificates used | :bro:type:`Known::CertsInfo` |
+-----------------+---------------------------------------+------------------------------+
| smtp.log | Summarizes SMTP traffic on a network | :bro:type:`SMTP::Info` |
+-----------------+---------------------------------------+------------------------------+
| dns.log | Shows all DNS activity on a network | :bro:type:`DNS::Info` |
+-----------------+---------------------------------------+------------------------------+
| conn.log | Records all connections seen by Bro | :bro:type:`Conn::Info` |
+-----------------+---------------------------------------+------------------------------+
| dpd.log | Shows network activity on | :bro:type:`DPD::Info` |
| | non-standard ports | |
+-----------------+---------------------------------------+------------------------------+
| files.log | Records information about all files | :bro:type:`Files::Info` |
| | transmitted over the network | |
+-----------------+---------------------------------------+------------------------------+
| weird.log | Records unexpected protocol-level | :bro:type:`Weird::Info` |
| | activity | |
+-----------------+---------------------------------------+------------------------------+

71
doc/mimestats/index.rst Normal file
View file

@ -0,0 +1,71 @@
.. _mime-stats:
====================
MIME Type Statistics
====================
Files are constantly transmitted over HTTP on regular networks. These
files belong to a specific category (e.g., executable, text, image)
identified by a `Multipurpose Internet Mail Extension (MIME)
<http://en.wikipedia.org/wiki/MIME>`_. Although MIME was originally
developed to identify the type of non-text attachments on email, it is
also used by a web browser to identify the type of files transmitted and
present them accordingly.
In this tutorial, we will demonstrate how to use the Sumstats Framework
to collect statistical information based on MIME types; specifically,
the total number of occurrences, size in bytes, and number of unique
hosts transmitting files over HTTP per each type. For instructions on
extracting and creating a local copy of these files, visit :ref:`this
tutorial <http-monitor>`.
------------------------------------------------
MIME Statistics with Sumstats
------------------------------------------------
When working with the :ref:`Summary Statistics Framework
<sumstats-framework>`, you need to define three different pieces: (i)
Observations, where the event is observed and fed into the framework.
(ii) Reducers, where observations are collected and measured. (iii)
Sumstats, where the main functionality is implemented.
We start by defining our observation along with a record to store
all statistical values and an observation interval. We are conducting our
observation on the :bro:see:`HTTP::log_http` event and are interested
in the MIME type, size of the file ("response_body_len"), and the
originator host ("orig_h"). We use the MIME type as our key and create
observers for the other two values.
.. btest-include:: ${DOC_ROOT}/mimestats/mimestats.bro
:lines: 6-29, 54-64
Next, we create the reducers. The first will accumulate file sizes
and the second will make sure we only store a host ID once. Below is
the partial code from a :bro:see:`bro_init` handler.
.. btest-include:: ${DOC_ROOT}/mimestats/mimestats.bro
:lines: 34-37
In our final step, we create the SumStats where we check for the
observation interval. Once it expires, we populate the record
(defined above) with all the relevant data and write it to a log.
.. btest-include:: ${DOC_ROOT}/mimestats/mimestats.bro
:lines: 38-51
After putting the three pieces together we end up with the following final code for
our script.
.. btest-include:: ${DOC_ROOT}/mimestats/mimestats.bro
.. btest:: mimestats
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/bro.org.pcap ${DOC_ROOT}/mimestats/mimestats.bro
@TEST-EXEC: btest-rst-include mime_metrics.log
.. note::
The redefinition of :bro:see:`Site::local_nets` is only done inside
this script to make it a self-contained example. It's typically
redefined somewhere else.

View file

@ -0,0 +1,64 @@
@load base/utils/site
@load base/frameworks/sumstats
redef Site::local_nets += { 10.0.0.0/8 };
module MimeMetrics;
export {
redef enum Log::ID += { LOG };
type Info: record {
## Timestamp when the log line was finished and written.
ts: time &log;
## Time interval that the log line covers.
ts_delta: interval &log;
## The mime type
mtype: string &log;
## The number of unique local hosts that fetched this mime type
uniq_hosts: count &log;
## The number of hits to the mime type
hits: count &log;
## The total number of bytes received by this mime type
bytes: count &log;
};
## The frequency of logging the stats collected by this script.
const break_interval = 5mins &redef;
}
event bro_init() &priority=3
{
Log::create_stream(MimeMetrics::LOG, [$columns=Info]);
local r1: SumStats::Reducer = [$stream="mime.bytes",
$apply=set(SumStats::SUM)];
local r2: SumStats::Reducer = [$stream="mime.hits",
$apply=set(SumStats::UNIQUE)];
SumStats::create([$name="mime-metrics",
$epoch=break_interval,
$reducers=set(r1, r2),
$epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) =
{
local l: Info;
l$ts = network_time();
l$ts_delta = break_interval;
l$mtype = key$str;
l$bytes = double_to_count(floor(result["mime.bytes"]$sum));
l$hits = result["mime.hits"]$num;
l$uniq_hosts = result["mime.hits"]$unique;
Log::write(MimeMetrics::LOG, l);
}]);
}
event HTTP::log_http(rec: HTTP::Info)
{
if ( Site::is_local_addr(rec$id$orig_h) && rec?$resp_mime_types )
{
local mime_type = rec$resp_mime_types[0];
SumStats::observe("mime.bytes", [$str=mime_type],
[$num=rec$response_body_len]);
SumStats::observe("mime.hits", [$str=mime_type],
[$str=cat(rec$id$orig_h)]);
}
}

View file

@ -1,39 +1,38 @@
.. _FAQ: http://www.bro.org/documentation/faq.html
.. _quickstart:
================= =================
Quick Start Guide Quick Start Guide
================= =================
.. rst-class:: opening
The short story for getting Bro up and running in a simple configuration
for analysis of either live traffic from a network interface or a packet
capture trace file.
.. contents:: .. contents::
Installation
============
Bro works on most modern, Unix-based systems and requires no custom Bro works on most modern, Unix-based systems and requires no custom
hardware. It can be downloaded in either pre-built binary package or hardware. It can be downloaded in either pre-built binary package or
source code forms. See :doc:`Installing Bro <INSTALL>` for instructions source code forms. See :ref:`installing-bro` for instructions on how to
on how to install Bro. install Bro.
.. note:: Below, ``$PREFIX`` is used to reference the Bro installation In the examples below, ``$PREFIX`` is used to reference the Bro
root directory. installation root directory, which by default is ``/usr/local/bro`` if
you install from source.
Using BroControl Managing Bro with BroControl
================ ============================
BroControl is an interactive shell for easily operating/managing Bro BroControl is an interactive shell for easily operating/managing Bro
installations on a single system or even across multiple systems in a installations on a single system or even across multiple systems in a
traffic-monitoring cluster. traffic-monitoring cluster. This section explains how to use BroControl
to manage a stand-alone Bro installation. For instructions on how to
configure a Bro cluster, see the documentation for :doc:`BroControl
<../components/broctl/README>`.
A Minimal Starting Configuration A Minimal Starting Configuration
-------------------------------- --------------------------------
These are the basic configuration changes to make for a minimal BroControl installation These are the basic configuration changes to make for a minimal BroControl
that will manage a single Bro instance on the ``localhost``: installation that will manage a single Bro instance on the ``localhost``:
1) In ``$PREFIX/etc/node.cfg``, set the right interface to monitor. 1) In ``$PREFIX/etc/node.cfg``, set the right interface to monitor.
2) In ``$PREFIX/etc/networks.cfg``, comment out the default settings and add 2) In ``$PREFIX/etc/networks.cfg``, comment out the default settings and add
@ -68,9 +67,9 @@ policy and output the results in ``$PREFIX/logs``.
.. note:: The user starting BroControl needs permission to capture .. note:: The user starting BroControl needs permission to capture
network traffic. If you are not root, you may need to grant further network traffic. If you are not root, you may need to grant further
privileges to the account you're using; see the :doc:`FAQ <faq>`. privileges to the account you're using; see the FAQ_. Also, if it
Also, if it looks like Bro is not seeing any traffic, check out looks like Bro is not seeing any traffic, check out the FAQ entry on
the FAQ entry on checksum offloading. checksum offloading.
You can leave it running for now, but to stop this Bro instance you would do: You can leave it running for now, but to stop this Bro instance you would do:
@ -78,7 +77,8 @@ You can leave it running for now, but to stop this Bro instance you would do:
[BroControl] > stop [BroControl] > stop
We also recommend to insert the following entry into `crontab`:: We also recommend to insert the following entry into the crontab of the user
running BroControl::
0-59/5 * * * * $PREFIX/bin/broctl cron 0-59/5 * * * * $PREFIX/bin/broctl cron
@ -115,20 +115,18 @@ columns (shortened for brevity) show a request to the root of Bro website::
Some logs are worth explicit mention: Some logs are worth explicit mention:
``weird.log`` ``conn.log``
Contains unusual/exceptional activity that can indicate Contains an entry for every connection seen on the wire, with
malformed connections, traffic that doesn't conform to a particular basic properties such as time and duration, originator and
protocol, malfunctioning/misconfigured hardware, or even an attacker responder IP addresses, services and ports, payload size, and
attempting to avoid/confuse a sensor. Without context, it's hard to much more. This log provides a comprehensive record of the
judge whether this category of activity is interesting and so that is network's activity.
left up to the user to configure.
``notice.log`` ``notice.log``
Identifies specific activity that Bro recognizes as Identifies specific activity that Bro recognizes as
potentially interesting, odd, or bad. In Bro-speak, such potentially interesting, odd, or bad. In Bro-speak, such
activity is called a "notice". activity is called a "notice".
By default, ``BroControl`` regularly takes all the logs from By default, ``BroControl`` regularly takes all the logs from
``$PREFIX/logs/current`` and archives/compresses them to a directory ``$PREFIX/logs/current`` and archives/compresses them to a directory
named by date, e.g. ``$PREFIX/logs/2011-10-06``. The frequency at named by date, e.g. ``$PREFIX/logs/2011-10-06``. The frequency at
@ -162,7 +160,7 @@ changes we want to make:
attempt looks like it may have been successful, and we want email when attempt looks like it may have been successful, and we want email when
that happens, but only for certain servers. that happens, but only for certain servers.
So we've defined *what* we want to do, but need to know *where* to do it. We've defined *what* we want to do, but need to know *where* to do it.
The answer is to use a script written in the Bro programming language, so The answer is to use a script written in the Bro programming language, so
let's do a quick intro to Bro scripting. let's do a quick intro to Bro scripting.
@ -188,7 +186,7 @@ must explicitly choose if they want to load them.
The main entry point for the default analysis configuration of a standalone The main entry point for the default analysis configuration of a standalone
Bro instance managed by BroControl is the ``$PREFIX/share/bro/site/local.bro`` Bro instance managed by BroControl is the ``$PREFIX/share/bro/site/local.bro``
script. So we'll be adding to that in the following sections, but first script. We'll be adding to that in the following sections, but first
we have to figure out what to add. we have to figure out what to add.
Redefining Script Option Variables Redefining Script Option Variables
@ -204,8 +202,8 @@ A redefineable constant might seem strange, but what that really means is that
the variable's value may not change at run-time, but whose initial value can be the variable's value may not change at run-time, but whose initial value can be
modified via the ``redef`` operator at parse-time. modified via the ``redef`` operator at parse-time.
So let's continue on our path to modify the behavior for the two SSL Let's continue on our path to modify the behavior for the two SSL
and SSH notices. Looking at :doc:`scripts/base/frameworks/notice/main`, and SSH notices. Looking at :doc:`/scripts/base/frameworks/notice/main.bro`,
we see that it advertises: we see that it advertises:
.. code:: bro .. code:: bro
@ -218,7 +216,7 @@ we see that it advertises:
const ignored_types: set[Notice::Type] = {} &redef; const ignored_types: set[Notice::Type] = {} &redef;
} }
That's exactly what we want to do for the SSL notice. So add to ``local.bro``: That's exactly what we want to do for the SSL notice. Add to ``local.bro``:
.. code:: bro .. code:: bro
@ -270,14 +268,11 @@ that only takes the email action for SSH logins to a defined set of servers:
192.168.1.102, 192.168.1.102,
} &redef; } &redef;
redef Notice::policy += { hook Notice::policy(n: Notice::Info)
[$action = Notice::ACTION_EMAIL,
$pred(n: Notice::Info) =
{ {
return n$note == SSH::Login && n$id$resp_h in watched_servers; if ( n$note == SSH::SUCCESSFUL_LOGIN && n$id$resp_h in watched_servers )
add n$actions[Notice::ACTION_EMAIL];
} }
]
};
You'll just have to trust the syntax for now, but what we've done is You'll just have to trust the syntax for now, but what we've done is
first declare our own variable to hold a set of watched addresses, first declare our own variable to hold a set of watched addresses,
@ -286,9 +281,9 @@ an email on the condition that the predicate function evaluates to true, which
is whenever the notice type is an SSH login and the responding host stored is whenever the notice type is an SSH login and the responding host stored
inside the ``Info`` record's connection field is in the set of watched servers. inside the ``Info`` record's connection field is in the set of watched servers.
.. note:: record field member access is done with the '$' character .. note:: Record field member access is done with the '$' character
instead of a '.' as might be expected from other languages, in instead of a '.' as might be expected from other languages, in
order to avoid ambiguity with the builtin address type's use of '.' order to avoid ambiguity with the built-in address type's use of '.'
in IPv4 dotted decimal representations. in IPv4 dotted decimal representations.
Remember, to finalize that configuration change perform the ``check``, Remember, to finalize that configuration change perform the ``check``,
@ -302,21 +297,27 @@ tweak the most basic options. Here's some suggestions on what to explore next:
* We only looked at how to change options declared in the notice framework, * We only looked at how to change options declared in the notice framework,
there's many more options to look at in other script packages. there's many more options to look at in other script packages.
* Continue reading with :ref:`Using Bro <using-bro>` chapter which goes
into more depth on working with Bro; then look at
:ref:`writing-scripts` for learning how to start writing your own
scripts.
* Look at the scripts in ``$PREFIX/share/bro/policy`` for further ones * Look at the scripts in ``$PREFIX/share/bro/policy`` for further ones
you may want to load. you may want to load; you can browse their documentation at the
:ref:`overview of script packages <script-packages>`.
* Reading the code of scripts that ship with Bro is also a great way to gain * Reading the code of scripts that ship with Bro is also a great way to gain
understanding of the language and how you can start writing your own custom further understanding of the language and how scripts tend to be
analysis. structured.
* Review the :doc:`FAQ <faq>`. * Review the FAQ_.
* Continue reading below for another mini-tutorial on using Bro as a standalone * Continue reading below for another mini-tutorial on using Bro as a standalone
command-line utility. command-line utility.
Bro, the Command-Line Utility Bro as a Command-Line Utility
============================= =============================
If you prefer not to use BroControl (e.g. don't need its automation and If you prefer not to use BroControl (e.g. don't need its automation
management features), here's how to directly control Bro for your analysis and management features), here's how to directly control Bro for your
activities. analysis activities from the command line for both live traffic and
offline working from traces.
Monitoring Live Traffic Monitoring Live Traffic
----------------------- -----------------------
@ -333,9 +334,9 @@ that's available.
Bro will output log files into the working directory. Bro will output log files into the working directory.
.. note:: The :doc:`FAQ <faq>` entries about .. note:: The FAQ_ entries about
capturing as an unprivileged user and checksum offloading are particularly capturing as an unprivileged user and checksum offloading are
relevant at this point. particularly relevant at this point.
To use the site-specific ``local.bro`` script, just add it to the To use the site-specific ``local.bro`` script, just add it to the
command-line: command-line:
@ -412,7 +413,7 @@ logging) and adds SSL certificate validation.
You might notice that a script you load from the command line uses the You might notice that a script you load from the command line uses the
``@load`` directive in the Bro language to declare dependence on other scripts. ``@load`` directive in the Bro language to declare dependence on other scripts.
This directive is similar to the ``#include`` of C/C++, except the semantics This directive is similar to the ``#include`` of C/C++, except the semantics
are "load this script if it hasn't already been loaded". are, "load this script if it hasn't already been loaded."
.. note:: If one wants Bro to be able to load scripts that live outside the .. note:: If one wants Bro to be able to load scripts that live outside the
default directories in Bro's installation root, the ``BROPATH`` environment default directories in Bro's installation root, the ``BROPATH`` environment
@ -420,3 +421,19 @@ are "load this script if it hasn't already been loaded".
to be searched for scripts. See the default search path by doing to be searched for scripts. See the default search path by doing
``bro --help``. ``bro --help``.
Running Bro Without Installing
------------------------------
For developers that wish to run Bro directly from the ``build/``
directory (i.e., without performing ``make install``), they will have
to first adjust ``BROPATH`` and ``BROMAGIC`` to look for scripts and
additional files inside the build directory. Sourcing either
``build/bro-path-dev.sh`` or ``build/bro-path-dev.csh`` as appropriate
for the current shell accomplishes this and also augments your
``PATH`` so you can use the Bro binary directly::
./configure
make
source build/bro-path-dev.sh
bro <options>

View file

@ -1,194 +0,0 @@
Reporting Problems
==================
.. rst-class:: opening
Here we summarize some steps to follow when you see Bro doing
something it shouldn't. To provide help, it is often crucial for
us to have a way of reliably reproducing the effect you're seeing.
Unfortunately, reproducing problems can be rather tricky with Bro
because more often than not, they occur only in either very rare
situations or only after Bro has been running for some time. In
particular, getting a small trace showing a specific effect can be
a real problem. In the following, we'll summarize some strategies
to this end.
Reporting Problems
------------------
Generally, when you encounter a problem with Bro, the best thing to do
is opening a new ticket in `Bro's issue tracker
<http://tracker.bro.org/>`__ and include information on how to
reproduce the issue. Ideally, your ticket should come with the
following:
* The Bro version you're using (if working directly from the git
repository, the branch and revision number.)
* The output you're seeing along with a description of what you'd expect
Bro to do instead.
* A *small* trace in `libpcap format <http://www.tcpdump.org>`__
demonstrating the effect (assuming the problem doesn't happen right
at startup already).
* The exact command-line you're using to run Bro with that trace. If
you can, please try to run the Bro binary directly from the command
line rather than using BroControl.
* Any non-standard scripts you're using (but please only those really
necessary; just a small code snippet triggering the problem would
be perfect).
* If you encounter a crash, information from the core dump, such as
the stack backtrace, can be very helpful. See below for more on
this.
How Do I Get a Trace File?
--------------------------
As Bro is usually running live, coming up with a small trace file that
reproduces a problem can turn out to be quite a challenge. Often it
works best to start with a large trace that triggers the problem,
and then successively thin it out as much as possible.
To get to the initial large trace, here are a few things you can try:
* Capture a trace with `tcpdump <http://www.tcpdump.org/>`__, either
on the same interface Bro is running on, or on another host where
you can generate traffic of the kind likely triggering the problem
(e.g., if you're seeing problems with the HTTP analyzer, record some
of your Web browsing on your desktop.) When using tcpdump, don't
forget to record *complete* packets (``tcpdump -s 0 ...``). You can
reduce the amount of traffic captured by using a suitable BPF filter
(e.g., for HTTP only, try ``port 80``).
* Bro's command-line option ``-w <trace>`` records all packets it
processes into the given file. You can then later run Bro
offline on this trace and it will process the packets in the same
way as it did live. This is particularly helpful with problems that
only occur after Bro has already been running for some time. For
example, sometimes a crash may be triggered by a particular kind of
traffic only occurring rarely. Running Bro live with ``-w`` and
then, after the crash, offline on the recorded trace might, with a
little bit of luck, reproduce the problem reliably. However, be
careful with ``-w``: it can result in huge trace files, quickly
filling up your disk. (One way to mitigate the space issues is to
periodically delete the trace file by configuring
``rotate-logs.bro`` accordingly. BroControl does that for you if you
set its ``SaveTraces`` option.)
* Finally, you can try running Bro on a publically available trace
file, such as `anonymized FTP traffic <http://www-nrg.ee.lbl.gov
/anonymized-traces.html>`__, `headers-only enterprise traffic
<http://www.icir.org/enterprise-tracing/Overview.html>`__, or
`Defcon traffic <http://cctf.shmoo.com/>`__. Some of these
particularly stress certain components of Bro (e.g., the Defcon
traces contain tons of scans).
Once you have a trace that demonstrates the effect, you will often
notice that it's pretty big, in particular if recorded from the link
you're monitoring. Therefore, the next step is to shrink its size as
much as possible. Here are a few things you can try to this end:
* Very often, a single connection is able to demonstrate the problem.
If you can identify which one it is (e.g., from one of Bro's
``*.log`` files) you can extract the connection's packets from the
trace using tcpdump by filtering for the corresponding 4-tuple of
addresses and ports:
.. console::
> tcpdump -r large.trace -w small.trace host <ip1> and port <port1> and host <ip2> and port <port2>
* If you can't reduce the problem to a connection, try to identify
either a host pair or a single host triggering it, and filter down
the trace accordingly.
* You can try to extract a smaller time slice from the trace using
`TCPslice <http://www.tcpdump.org/related.html>`__. For example, to
extract the first 100 seconds from the trace:
.. console::
# Test comment
> tcpslice +100 <in >out
Alternatively, tcpdump extracts the first ``n`` packets with its
option ``-c <n>``.
Getting More Information After a Crash
--------------------------------------
If Bro crashes, a *core dump* can be very helpful to nail down the
problem. Examining a core is not for the faint of heart but can reveal
extremely useful information.
First, you should configure Bro with the option ``--enable-debug`` and
recompile; this will disable all compiler optimizations and thus make
the core dump more useful (don't expect great performance with this
version though; compiling Bro without optimization has a noticeable
impact on its CPU usage.). Then enable core dumps if you haven't
already (e.g., ``ulimit -c unlimited`` if you're using bash).
Once Bro has crashed, start gdb with the Bro binary and the file
containing the core dump. (Alternatively, you can also run Bro
directly inside gdb instead of working from a core file.) The first
helpful information to include with your tracker ticket is a stack
backtrace, which you get with gdb's ``bt`` command:
.. console::
> gdb bro core
[...]
> bt
If the crash occurs inside Bro's script interpreter, the next thing to
do is identifying the line of script code processed just before the
abnormal termination. Look for methods in the stack backtrace which
belong to any of the script interpreter's classes. Roughly speaking,
these are all classes with names ending in ``Expr``, ``Stmt``, or
``Val``. Then climb up the stack with ``up`` until you reach the first
of these methods. The object to which ``this`` is pointing will have a
``Location`` object, which in turn contains the file name and line
number of the corresponding piece of script code. Continuing the
example from above, here's how to get that information:
.. console::
[in gdb]
> up
> ...
> up
> print this->location->filename
> print this->location->first_line
If the crash occurs while processing input packets but you cannot
directly tell which connection is responsible (and thus not extract
its packets from the trace as suggested above), try getting the
4-tuple of the connection currently being processed from the core dump
by again examining the stack backtrace, this time looking for methods
belonging to the ``Connection`` class. That class has members
``orig_addr``/``resp_addr`` and ``orig_port``/``resp_port`` storing
(pointers to) the IP addresses and ports respectively:
.. console::
[in gdb]
> up
> ...
> up
> printf "%08x:%04x %08x:%04x\n", *this->orig_addr, this->orig_port, *this->resp_addr, this->resp_port
Note that these values are stored in `network byte order
<http://en.wikipedia.org/wiki/Endianness#Endianness_in_networking>`__
so you will need to flip the bytes around if you are on a low-endian
machine (which is why the above example prints them in hex). For
example, if an IP address prints as ``0100007f`` , that's 127.0.0.1 .

View file

@ -1,55 +1,106 @@
Builtin Types and Attributes Types and Attributes
============================ ====================
Types Types
----- -----
Every value in a Bro script has a type (see below for a list of all built-in
types). Although Bro variables have static types (meaning that their type
is fixed), their type is inferred from the value to which they are
initially assigned when the variable is declared without an explicit type
name.
Automatic conversions happen when a binary operator has operands of
different types. Automatic conversions are limited to converting between
numeric types. The numeric types are ``int``, ``count``, and ``double``
(``bool`` is not a numeric type).
When an automatic conversion occurs, values are promoted to the "highest"
type in the expression. In general, this promotion follows a simple
hierarchy: ``double`` is highest, ``int`` comes next, and ``count`` is
lowest.
The Bro scripting language supports the following built-in types. The Bro scripting language supports the following built-in types.
.. bro:type:: void .. bro:type:: void
An internal Bro type representing an absence of a type. Should An internal Bro type (i.e., "void" is not a reserved keyword in the Bro
most often be seen as a possible function return type. scripting language) representing the absence of a return type for a
function.
.. bro:type:: bool .. bro:type:: bool
Reflects a value with one of two meanings: true or false. The two Reflects a value with one of two meanings: true or false. The two
``bool`` constants are ``T`` and ``F``. ``bool`` constants are ``T`` and ``F``.
The ``bool`` type supports the following operators: equality/inequality
(``==``, ``!=``), logical and/or (``&&``, ``||``), logical
negation (``!``), and absolute value (where ``|T|`` is 1, and ``|F|`` is 0).
.. bro:type:: int .. bro:type:: int
A numeric type representing a signed integer. An ``int`` constant A numeric type representing a 64-bit signed integer. An ``int`` constant
is a string of digits preceded by a ``+`` or ``-`` sign, e.g. is a string of digits preceded by a ``+`` or ``-`` sign, e.g.
``-42`` or ``+5``. When using type inferencing use care so that the ``-42`` or ``+5`` (the "+" sign is optional but see note about type
inferencing below). An ``int`` constant can also be written in
hexadecimal notation (in which case "0x" must be between the sign and
the hex digits), e.g. ``-0xFF`` or ``+0xabc123``.
The ``int`` type supports the following operators: arithmetic
operators (``+``, ``-``, ``*``, ``/``, ``%``), comparison operators
(``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), assignment operators
(``=``, ``+=``, ``-=``), pre-increment (``++``), pre-decrement
(``--``), and absolute value (e.g., ``|-3|`` is 3).
When using type inferencing use care so that the
intended type is inferred, e.g. ``local size_difference = 0`` will intended type is inferred, e.g. ``local size_difference = 0`` will
infer :bro:type:`count`, while ``local size_difference = +0`` infer :bro:type:`count`, while ``local size_difference = +0``
will infer :bro:type:`int`. will infer :bro:type:`int`.
.. bro:type:: count .. bro:type:: count
A numeric type representing an unsigned integer. A ``count`` A numeric type representing a 64-bit unsigned integer. A ``count``
constant is a string of digits, e.g. ``1234`` or ``0``. constant is a string of digits, e.g. ``1234`` or ``0``. A ``count``
can also be written in hexadecimal notation (in which case "0x" must
precede the hex digits), e.g. ``0xff`` or ``0xABC123``.
The ``count`` type supports the same operators as the :bro:type:`int`
type. A unary plus or minus applied to a ``count`` results in an ``int``.
.. bro:type:: counter .. bro:type:: counter
An alias to :bro:type:`count`. An alias to :bro:type:`count`.
.. TODO: is there anything special about this type?
.. bro:type:: double .. bro:type:: double
A numeric type representing a double-precision floating-point A numeric type representing a double-precision floating-point
number. Floating-point constants are written as a string of digits number. Floating-point constants are written as a string of digits
with an optional decimal point, optional scale-factor in scientific with an optional decimal point, optional scale-factor in scientific
notation, and optional ``+`` or ``-`` sign. Examples are ``-1234``, notation, and optional ``+`` or ``-`` sign. Examples are ``-1234``,
``-1234e0``, ``3.14159``, and ``.003e-23``. ``-1234e0``, ``3.14159``, and ``.003E-23``.
The ``double`` type supports the following operators: arithmetic
operators (``+``, ``-``, ``*``, ``/``), comparison operators
(``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), assignment operators
(``=``, ``+=``, ``-=``), and absolute value (e.g., ``|-3.14|`` is 3.14).
When using type inferencing use care so that the
intended type is inferred, e.g. ``local size_difference = 5`` will
infer :bro:type:`count`, while ``local size_difference = 5.0``
will infer :bro:type:`double`.
.. bro:type:: time .. bro:type:: time
A temporal type representing an absolute time. There is currently A temporal type representing an absolute time. There is currently
no way to specify a ``time`` constant, but one can use the no way to specify a ``time`` constant, but one can use the
:bro:id:`current_time` or :bro:id:`network_time` built-in functions :bro:id:`double_to_time`, :bro:id:`current_time`, or :bro:id:`network_time`
to assign a value to a ``time``-typed variable. built-in functions to assign a value to a ``time``-typed variable.
Time values support the comparison operators (``==``, ``!=``, ``<``,
``<=``, ``>``, ``>=``). A ``time`` value can be subtracted from
another ``time`` value to produce an ``interval`` value. An ``interval``
value can be added to, or subtracted from, a ``time`` value to produce a
``time`` value. The absolute value of a ``time`` value is a ``double``
with the same numeric value.
.. bro:type:: interval .. bro:type:: interval
@ -61,16 +112,44 @@ The Bro scripting language supports the following built-in types.
constant and time unit is optional. Appending the letter "s" to the constant and time unit is optional. Appending the letter "s" to the
time unit in order to pluralize it is also optional (to no semantic time unit in order to pluralize it is also optional (to no semantic
effect). Examples of ``interval`` constants are ``3.5 min`` and effect). Examples of ``interval`` constants are ``3.5 min`` and
``3.5mins``. An ``interval`` can also be negated, for example ``- ``3.5mins``. An ``interval`` can also be negated, for example
12 hr`` represents "twelve hours in the past". Intervals also ``-12 hr`` represents "twelve hours in the past".
support addition, subtraction, multiplication, division, and
comparison operations. Intervals support addition and subtraction. Intervals also support
division (in which case the result is a ``double`` value), the
comparison operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``),
and the assignment operators (``=``, ``+=``, ``-=``). Also, an
``interval`` can be multiplied or divided by an arithmetic type
(``count``, ``int``, or ``double``) to produce an ``interval`` value.
The absolute value of an ``interval`` is a ``double`` value equal to the
number of seconds in the ``interval`` (e.g., ``|-1 min|`` is 60).
.. bro:type:: string .. bro:type:: string
A type used to hold character-string values which represent text. A type used to hold character-string values which represent text.
String constants are created by enclosing text in double quotes (") String constants are created by enclosing text in double quotes (")
and the backslash character (\\) introduces escape sequences. and the backslash character (\\) introduces escape sequences (all of
the C-style escape sequences are supported).
Strings support concatenation (``+``), and assignment (``=``, ``+=``).
Strings also support the comparison operators (``==``, ``!=``, ``<``,
``<=``, ``>``, ``>=``). The number of characters in a string can be
found by enclosing the string within pipe characters (e.g., ``|"abc"|``
is 3).
The subscript operator can extract an individual character or a substring
of a string (string indexing is zero-based, but an index of
-1 refers to the last character in the string, and -2 refers to the
second-to-last character, etc.). When extracting a substring, the
starting and ending index values are separated by a colon. For example::
local orig = "0123456789";
local third_char = orig[2];
local last_char = orig[-1];
local first_three_chars = orig[0:2];
Substring searching can be performed using the "in" or "!in"
operators (e.g., "bar" in "foobar" yields true).
Note that Bro represents strings internally as a count and vector of Note that Bro represents strings internally as a count and vector of
bytes rather than a NUL-terminated byte string (although string bytes rather than a NUL-terminated byte string (although string
@ -127,9 +206,7 @@ The Bro scripting language supports the following built-in types.
.. bro:type:: enum .. bro:type:: enum
A type allowing the specification of a set of related values that A type allowing the specification of a set of related values that
have no further structure. The only operations allowed on have no further structure. An example declaration:
enumerations are equality comparisons and they do not have
associated values or ordering. An example declaration:
.. code:: bro .. code:: bro
@ -137,9 +214,9 @@ The Bro scripting language supports the following built-in types.
The last comma after ``Blue`` is optional. The last comma after ``Blue`` is optional.
.. bro:type:: timer The only operations allowed on enumerations are equality comparisons
(``==``, ``!=``) and assignment (``=``).
.. TODO: is this a type that's exposed to users? Enumerations do not have associated values or ordering.
.. bro:type:: port .. bro:type:: port
@ -149,10 +226,15 @@ The Bro scripting language supports the following built-in types.
message code. A ``port`` constant is written as an unsigned integer message code. A ``port`` constant is written as an unsigned integer
followed by one of ``/tcp``, ``/udp``, ``/icmp``, or ``/unknown``. followed by one of ``/tcp``, ``/udp``, ``/icmp``, or ``/unknown``.
Ports can be compared for equality and also for ordering. When Ports support the comparison operators (``==``, ``!=``, ``<``, ``<=``,
comparing order across transport-level protocols, ``unknown`` < ``>``, ``>=``). When comparing order across transport-level protocols,
``tcp`` < ``udp`` < ``icmp``, for example ``65535/tcp`` is smaller ``unknown`` < ``tcp`` < ``udp`` < ``icmp``, for example ``65535/tcp``
than ``0/udp``. is smaller than ``0/udp``.
Note that you can obtain the transport-level protocol type of a ``port``
with the :bro:id:`get_port_transport_proto` built-in function, and
the numeric value of a ``port`` with the :bro:id:`port_to_count`
built-in function.
.. bro:type:: addr .. bro:type:: addr
@ -162,22 +244,29 @@ The Bro scripting language supports the following built-in types.
``A1.A2.A3.A4``, where Ai all lie between 0 and 255. ``A1.A2.A3.A4``, where Ai all lie between 0 and 255.
IPv6 address constants are written as colon-separated hexadecimal form IPv6 address constants are written as colon-separated hexadecimal form
as described by :rfc:`2373`, but additionally encased in square brackets. as described by :rfc:`2373` (including the mixed notation with embedded
The mixed notation with embedded IPv4 addresses as dotted-quads in the IPv4 addresses as dotted-quads in the lower 32 bits), but additionally
lower 32 bits is also allowed. encased in square brackets. Some examples: ``[2001:db8::1]``,
Some examples: ``[2001:db8::1]``, ``[::ffff:192.168.1.100]``, or ``[::ffff:192.168.1.100]``, or
``[aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222]``. ``[aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222]``.
Note that IPv4-mapped IPv6 addresses (i.e., addresses with the first 80
bits zero, the next 16 bits one, and the remaining 32 bits are the IPv4
address) are treated internally as IPv4 addresses (for example,
``[::ffff:192.168.1.100]`` is equal to ``192.168.1.100``).
Hostname constants can also be used, but since a hostname can Hostname constants can also be used, but since a hostname can
correspond to multiple IP addresses, the type of such variable is a correspond to multiple IP addresses, the type of such a variable is a
:bro:type:`set` of :bro:type:`addr` elements. For example: :bro:type:`set` of :bro:type:`addr` elements. For example:
.. code:: bro .. code:: bro
local a = www.google.com; local a = www.google.com;
Addresses can be compared for (in)equality using ``==`` and ``!=``. Addresses can be compared for equality (``==``, ``!=``),
They can also be masked with ``/`` to produce a :bro:type:`subnet`: and also for ordering (``<``, ``<=``, ``>``, ``>=``). The absolute value
of an address gives the size in bits (32 for IPv4, and 128 for IPv6).
Addresses can also be masked with ``/`` to produce a :bro:type:`subnet`:
.. code:: bro .. code:: bro
@ -186,7 +275,8 @@ The Bro scripting language supports the following built-in types.
if ( a/16 == s ) if ( a/16 == s )
print "true"; print "true";
And checked for inclusion within a :bro:type:`subnet` using ``in`` : And checked for inclusion within a :bro:type:`subnet` using ``in``
or ``!in``:
.. code:: bro .. code:: bro
@ -195,6 +285,9 @@ The Bro scripting language supports the following built-in types.
if ( a in s ) if ( a in s )
print "true"; print "true";
Note that you can check if a given ``addr`` is IPv4 or IPv6 using
the :bro:id:`is_v4_addr` and :bro:id:`is_v6_addr` built-in functions.
.. bro:type:: subnet .. bro:type:: subnet
A type representing a block of IP addresses in CIDR notation. A A type representing a block of IP addresses in CIDR notation. A
@ -202,6 +295,10 @@ The Bro scripting language supports the following built-in types.
slash (/) and then the network prefix size specified as a decimal slash (/) and then the network prefix size specified as a decimal
number. For example, ``192.168.0.0/16`` or ``[fe80::]/64``. number. For example, ``192.168.0.0/16`` or ``[fe80::]/64``.
Subnets can be compared for equality (``==``, ``!=``). An
:bro:type:`addr` can be checked for inclusion in a subnet using
the "in" or "!in" operators.
.. bro:type:: any .. bro:type:: any
Used to bypass strong typing. For example, a function can take an Used to bypass strong typing. For example, a function can take an
@ -271,14 +368,14 @@ The Bro scripting language supports the following built-in types.
global t3 = MyTable([[$b=5]] = "b5", [[$b=7]] = "b7"); global t3 = MyTable([[$b=5]] = "b5", [[$b=7]] = "b7");
Accessing table elements if provided by enclosing values within square Accessing table elements is provided by enclosing index values within
brackets (``[]``), for example: square brackets (``[]``), for example:
.. code:: bro .. code:: bro
t[13] = "thirteen"; print t[11];
And membership can be tested with ``in``: And membership can be tested with ``in`` or ``!in``:
.. code:: bro .. code:: bro
@ -297,17 +394,23 @@ The Bro scripting language supports the following built-in types.
for ( [a, p] in services ) for ( [a, p] in services )
... ...
Add or overwrite individual table elements by assignment:
.. code:: bro
t[13] = "thirteen";
Remove individual table elements with ``delete``: Remove individual table elements with ``delete``:
.. code:: bro .. code:: bro
delete t[13]; delete t[13];
Nothing happens if the element with value ``13`` isn't present in Nothing happens if the element with index value ``13`` isn't present in
the table. the table.
Table size can be obtained by placing the table identifier between The number of elements in a table can be obtained by placing the table
vertical pipe (|) characters: identifier between vertical pipe characters:
.. code:: bro .. code:: bro
@ -355,27 +458,44 @@ The Bro scripting language supports the following built-in types.
global s4 = MySet([$b=1], [$b=2]); global s4 = MySet([$b=1], [$b=2]);
Set membership is tested with ``in``: Set membership is tested with ``in`` or ``!in``:
.. code:: bro .. code:: bro
if ( 21/tcp in s ) if ( 21/tcp in s )
... ...
if ( 21/tcp !in s )
...
Iterate over a set with a ``for`` loop:
.. code:: bro
local s: set[port];
for ( p in s )
...
Elements are added with ``add``: Elements are added with ``add``:
.. code:: bro .. code:: bro
add s[22/tcp]; add s[22/tcp];
Nothing happens if the element with value ``22/tcp`` was already present in
the set.
And removed with ``delete``: And removed with ``delete``:
.. code:: bro .. code:: bro
delete s[21/tcp]; delete s[21/tcp];
Set size can be obtained by placing the set identifier between Nothing happens if the element with value ``21/tcp`` isn't present in
vertical pipe (|) characters: the set.
The number of elements in a set can be obtained by placing the set
identifier between vertical pipe characters:
.. code:: bro .. code:: bro
@ -384,7 +504,8 @@ The Bro scripting language supports the following built-in types.
.. bro:type:: vector .. bro:type:: vector
A vector is like a :bro:type:`table`, except it's always indexed by a A vector is like a :bro:type:`table`, except it's always indexed by a
:bro:type:`count`. A vector is declared like: :bro:type:`count` (and vector indexing is always zero-based). A vector
is declared like:
.. code:: bro .. code:: bro
@ -411,21 +532,51 @@ The Bro scripting language supports the following built-in types.
global v2 = MyVec([$b=1], [$b=2], [$b=3]); global v2 = MyVec([$b=1], [$b=2], [$b=3]);
Adding an element to a vector involves accessing/assigning it: Accessing vector elements is provided by enclosing index values within
square brackets (``[]``), for example:
.. code:: bro .. code:: bro
v[3] = "four" print v[2];
Note how the vector indexing is 0-based. Iterate over a vector with a ``for`` loop:
Vector size can be obtained by placing the vector identifier between .. code:: bro
vertical pipe (|) characters:
local v: vector of string;
for ( n in v )
...
An element can be added to a vector by assigning the value (a value
that already exists at that index will be overwritten):
.. code:: bro
v[3] = "four";
The number of elements in a vector can be obtained by placing the vector
identifier between vertical pipe characters:
.. code:: bro .. code:: bro
|v| |v|
Vectors of integral types (``int`` or ``count``) support the pre-increment
(``++``) and pre-decrement operators (``--``), which will increment or
decrement each element in the vector.
Vectors of arithmetic types (``int``, ``count``, or ``double``) can be
operands of the arithmetic operators (``+``, ``-``, ``*``, ``/``, ``%``),
but both operands must have the same number of elements (and the modulus
operator ``%`` cannot be used if either operand is a ``vector of double``).
The resulting vector contains the result of the operation applied to each
of the elements in the operand vectors.
Vectors of bool can be operands of the logical "and" (``&&``) and logical
"or" (``||``) operators (both operands must have same number of elements).
The resulting vector of bool is the logical "and" (or logical "or") of
each element of the operand vectors.
.. bro:type:: record .. bro:type:: record
A ``record`` is a collection of values. Each value has a field name A ``record`` is a collection of values. Each value has a field name
@ -504,9 +655,11 @@ The Bro scripting language supports the following built-in types.
.. bro:type:: file .. bro:type:: file
Bro supports writing to files, but not reading from them. For Bro supports writing to files, but not reading from them. Files
example, declare, open, and write to a file and finally close it can be opened using either the :bro:id:`open` or :bro:id:`open_for_append`
like: built-in functions, and closed using the :bro:id:`close` built-in
function. For example, declare, open, and write to a file
and finally close it like:
.. code:: bro .. code:: bro
@ -515,7 +668,7 @@ The Bro scripting language supports the following built-in types.
close(f); close(f);
Writing to files like this for logging usually isn't recommended, for better Writing to files like this for logging usually isn't recommended, for better
logging support see :doc:`/logging`. logging support see :doc:`/frameworks/logging`.
.. bro:type:: function .. bro:type:: function
@ -544,8 +697,8 @@ The Bro scripting language supports the following built-in types.
Note that in the definition above, it's not necessary for us to have Note that in the definition above, it's not necessary for us to have
done the first (forward) declaration of ``greeting`` as a function done the first (forward) declaration of ``greeting`` as a function
type, but when it is, the argument list and return type much match type, but when it is, the return type and argument list (including the
exactly. name of each argument) must match exactly.
Function types don't need to have a name and can be assigned anonymously: Function types don't need to have a name and can be assigned anonymously:
@ -628,7 +781,7 @@ The Bro scripting language supports the following built-in types.
.. bro:type:: hook .. bro:type:: hook
A hook is another flavor of function that shares characteristics of A hook is another flavor of function that shares characteristics of
both a :bro:type:`function` and a :bro:type:`event`. They are like both a :bro:type:`function` and an :bro:type:`event`. They are like
events in that many handler bodies can be defined for the same hook events in that many handler bodies can be defined for the same hook
identifier and the order of execution can be enforced with identifier and the order of execution can be enforced with
:bro:attr:`&priority`. They are more like functions in the way they :bro:attr:`&priority`. They are more like functions in the way they
@ -717,14 +870,14 @@ scripting language supports the following built-in attributes.
.. bro:attr:: &optional .. bro:attr:: &optional
Allows a record field to be missing. For example the type ``record { Allows a record field to be missing. For example the type ``record {
a: int, b: port &optional }`` could be instantiated both as a: addr; b: port &optional; }`` could be instantiated both as
singleton ``[$a=127.0.0.1]`` or pair ``[$a=127.0.0.1, $b=80/tcp]``. singleton ``[$a=127.0.0.1]`` or pair ``[$a=127.0.0.1, $b=80/tcp]``.
.. bro:attr:: &default .. bro:attr:: &default
Uses a default value for a record field, a function/hook/event Uses a default value for a record field, a function/hook/event
parameter, or container elements. For example, ``table[int] of parameter, or container elements. For example, ``table[int] of
string &default="foo" }`` would create a table that returns the string &default="foo"`` would create a table that returns the
:bro:type:`string` ``"foo"`` for any non-existing index. :bro:type:`string` ``"foo"`` for any non-existing index.
.. bro:attr:: &redef .. bro:attr:: &redef
@ -744,18 +897,25 @@ scripting language supports the following built-in attributes.
.. bro:attr:: &add_func .. bro:attr:: &add_func
.. TODO: needs to be documented. Can be applied to an identifier with &redef to specify a function to
be called any time a "redef <id> += ..." declaration is parsed. The
function takes two arguments of the same type as the identifier, the first
being the old value of the variable and the second being the new
value given after the "+=" operator in the "redef" declaration. The
return value of the function will be the actual new value of the
variable after the "redef" declaration is parsed.
.. bro:attr:: &delete_func .. bro:attr:: &delete_func
.. TODO: needs to be documented. Same as &add_func, except for "redef" declarations that use the "-="
operator.
.. bro:attr:: &expire_func .. bro:attr:: &expire_func
Called right before a container element expires. The function's Called right before a container element expires. The function's
first parameter is of the same type of the container and the second first parameter is of the same type of the container and the second
parameter the same type of the container's index. The return parameter the same type of the container's index. The return
value is a :bro:type:`interval` indicating the amount of additional value is an :bro:type:`interval` indicating the amount of additional
time to wait before expiring the container element at the given time to wait before expiring the container element at the given
index (which will trigger another execution of this function). index (which will trigger another execution of this function).
@ -779,7 +939,7 @@ scripting language supports the following built-in attributes.
.. bro:attr:: &persistent .. bro:attr:: &persistent
Makes a variable persistent, i.e., its value is writen to disk (per Makes a variable persistent, i.e., its value is written to disk (per
default at shutdown time). default at shutdown time).
.. bro:attr:: &synchronized .. bro:attr:: &synchronized
@ -811,8 +971,9 @@ scripting language supports the following built-in attributes.
.. bro:attr:: &priority .. bro:attr:: &priority
Specifies the execution priority of an event handler. Higher values Specifies the execution priority (as a signed integer) of a hook or
are executed before lower ones. The default value is 0. event handler. Higher values are executed before lower ones. The
default value is 0.
.. bro:attr:: &group .. bro:attr:: &group
@ -825,5 +986,15 @@ scripting language supports the following built-in attributes.
.. bro:attr:: &error_handler .. bro:attr:: &error_handler
.. TODO: needs documented Internally set on the events that are associated with the reporter
framework: :bro:id:`reporter_info`, :bro:id:`reporter_warning`, and
:bro:id:`reporter_error`. It prevents any handlers of those events
from being able to generate reporter messages that go through any of
those events (i.e., it prevents an infinite event recursion). Instead,
such nested reporter messages are output to stderr.
.. bro:attr:: &type_column
Used by the input framework. It can be used on columns of type
:bro:type:`port` and specifies the name of an additional column in
the input file which specifies the protocol of the port (tcp/udp/icmp).

View file

@ -0,0 +1 @@
.. broxygen:file_analyzer:: *

View file

@ -0,0 +1,14 @@
================
Script Reference
================
.. toctree::
:maxdepth: 1
notices
proto-analyzers
file-analyzers
builtins
packages
scripts
Broxygen Example Script </scripts/broxygen/example.bro>

View file

@ -0,0 +1,8 @@
.. Not nice but I don't find a way to link to the notice index
.. directly from the upper level TOC tree.
Notices
=======
See the `Bro Notice Index <../bro-noticeindex.html>`_.

View file

@ -1,7 +1,7 @@
.. This is a stub doc to which broxygen appends during the build process .. _script-packages:
Index of All Bro Script Packages Bro Package Index
================================ =================
Bro has the following script packages (e.g. collections of related scripts in Bro has the following script packages (e.g. collections of related scripts in
a common directory). If the package directory contains a ``__load__.bro`` a common directory). If the package directory contains a ``__load__.bro``
@ -10,3 +10,5 @@ script, it supports being loaded in mass as a whole directory for convenience.
Packages/scripts in the ``base/`` directory are all loaded by default, while Packages/scripts in the ``base/`` directory are all loaded by default, while
ones in ``policy/`` provide functionality and customization options that are ones in ``policy/`` provide functionality and customization options that are
more appropriate for users to decide whether they'd like to load it or not. more appropriate for users to decide whether they'd like to load it or not.
.. broxygen:package_index:: *

View file

@ -0,0 +1 @@
.. broxygen:proto_analyzer:: *

View file

@ -0,0 +1,5 @@
================
Bro Script Index
================
.. broxygen:script_index:: *

View file

@ -0,0 +1,6 @@
@load base/protocols/conn
event connection_state_remove(c: connection)
{
print c;
}

View file

@ -0,0 +1,7 @@
@load base/protocols/conn
@load base/protocols/http
event connection_state_remove(c: connection)
{
print c;
}

View file

@ -0,0 +1,22 @@
type Service: record {
name: string;
ports: set[port];
rfc: count;
};
function print_service(serv: Service): string
{
print fmt("Service: %s(RFC%d)",serv$name, serv$rfc);
for ( p in serv$ports )
print fmt(" port: %s", p);
}
event bro_init()
{
local dns: Service = [$name="dns", $ports=set(53/udp, 53/tcp), $rfc=1035];
local http: Service = [$name="http", $ports=set(80/tcp, 8080/tcp), $rfc=2616];
print_service(dns);
print_service(http);
}

View file

@ -0,0 +1,41 @@
type Service: record {
name: string;
ports: set[port];
rfc: count;
};
type System: record {
name: string;
services: set[Service];
};
function print_service(serv: Service): string
{
print fmt(" Service: %s(RFC%d)",serv$name, serv$rfc);
for ( p in serv$ports )
print fmt(" port: %s", p);
}
function print_system(sys: System): string
{
print fmt("System: %s", sys$name);
for ( s in sys$services )
print_service(s);
}
event bro_init()
{
local server01: System;
server01$name = "morlock";
add server01$services[[ $name="dns", $ports=set(53/udp, 53/tcp), $rfc=1035]];
add server01$services[[ $name="http", $ports=set(80/tcp, 8080/tcp), $rfc=2616]];
print_system(server01);
# local dns: Service = [ $name="dns", $ports=set(53/udp, 53/tcp), $rfc=1035];
# local http: Service = [ $name="http", $ports=set(80/tcp, 8080/tcp), $rfc=2616];
# print_service(dns);
# print_service(http);
}

View file

@ -0,0 +1,22 @@
event bro_init()
{
local ssl_ports: set[port];
local non_ssl_ports = set( 23/tcp, 80/tcp, 143/tcp, 25/tcp );
# SSH
add ssl_ports[22/tcp];
# HTTPS
add ssl_ports[443/tcp];
# IMAPS
add ssl_ports[993/tcp];
# Check for SMTPS
if ( 587/tcp !in ssl_ports )
add ssl_ports[587/tcp];
for ( i in ssl_ports )
print fmt("SSL Port: %s", i);
for ( i in non_ssl_ports )
print fmt("Non-SSL Port: %s", i);
}

Some files were not shown because too many files have changed in this diff Show more