Merge remote-tracking branch 'origin/master' into topic/vladg/file-analysis-exe-analyzer

Conflicts:
	scripts/base/init-default.bro
	src/file_analysis/analyzer/CMakeLists.txt
This commit is contained in:
Vlad Grigorescu 2014-06-21 13:15:14 -04:00
commit b91b0646b8
1719 changed files with 78600 additions and 198617 deletions

15
.gitmodules vendored
View file

@ -1,18 +1,21 @@
[submodule "aux/bro-aux"] [submodule "aux/bro-aux"]
path = aux/bro-aux path = aux/bro-aux
url = git://git.bro-ids.org/bro-aux url = git://git.bro.org/bro-aux
[submodule "aux/binpac"] [submodule "aux/binpac"]
path = aux/binpac path = aux/binpac
url = git://git.bro-ids.org/binpac url = git://git.bro.org/binpac
[submodule "aux/broccoli"] [submodule "aux/broccoli"]
path = aux/broccoli path = aux/broccoli
url = git://git.bro-ids.org/broccoli url = git://git.bro.org/broccoli
[submodule "aux/broctl"] [submodule "aux/broctl"]
path = aux/broctl path = aux/broctl
url = git://git.bro-ids.org/broctl url = git://git.bro.org/broctl
[submodule "aux/btest"] [submodule "aux/btest"]
path = aux/btest path = aux/btest
url = git://git.bro-ids.org/btest url = git://git.bro.org/btest
[submodule "cmake"] [submodule "cmake"]
path = cmake path = cmake
url = git://git.bro-ids.org/cmake url = git://git.bro.org/cmake
[submodule "src/3rdparty"]
path = src/3rdparty
url = git://git.bro.org/bro-3rdparty

11937
CHANGES

File diff suppressed because it is too large Load diff

View file

@ -10,24 +10,18 @@ if (NOT BRO_SCRIPT_INSTALL_PATH)
# set the default Bro script installation path (user did not specify one) # set the default Bro script installation path (user did not specify one)
set(BRO_SCRIPT_INSTALL_PATH ${BRO_ROOT_DIR}/share/bro) set(BRO_SCRIPT_INSTALL_PATH ${BRO_ROOT_DIR}/share/bro)
endif () endif ()
set(BRO_SCRIPT_SOURCE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/scripts)
# sanitize the Bro script install directory into an absolute path # sanitize the Bro script install directory into an absolute path
# (CMake is confused by ~ as a representation of home directory) # (CMake is confused by ~ as a representation of home directory)
get_filename_component(BRO_SCRIPT_INSTALL_PATH ${BRO_SCRIPT_INSTALL_PATH} get_filename_component(BRO_SCRIPT_INSTALL_PATH ${BRO_SCRIPT_INSTALL_PATH}
ABSOLUTE) ABSOLUTE)
set(BRO_MAGIC_INSTALL_PATH ${BRO_ROOT_DIR}/share/bro/magic)
set(BRO_MAGIC_SOURCE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/magic)
configure_file(bro-path-dev.in ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev) configure_file(bro-path-dev.in ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev)
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.sh file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.sh
"export BROPATH=`${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev`\n" "export BROPATH=`${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev`\n"
"export BROMAGIC=\"${BRO_MAGIC_SOURCE_PATH}\"\n"
"export PATH=\"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n") "export PATH=\"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n")
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.csh file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.csh
"setenv BROPATH `${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev`\n" "setenv BROPATH `${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev`\n"
"setenv BROMAGIC \"${BRO_MAGIC_SOURCE_PATH}\"\n"
"setenv PATH \"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n") "setenv PATH \"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n")
file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/VERSION" VERSION LIMIT_COUNT 1) file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/VERSION" VERSION LIMIT_COUNT 1)
@ -58,7 +52,6 @@ FindRequiredPackage(BISON)
FindRequiredPackage(PCAP) FindRequiredPackage(PCAP)
FindRequiredPackage(OpenSSL) FindRequiredPackage(OpenSSL)
FindRequiredPackage(BIND) FindRequiredPackage(BIND)
FindRequiredPackage(LibMagic)
FindRequiredPackage(ZLIB) FindRequiredPackage(ZLIB)
if (NOT BinPAC_ROOT_DIR AND if (NOT BinPAC_ROOT_DIR AND
@ -67,6 +60,10 @@ if (NOT BinPAC_ROOT_DIR AND
endif () endif ()
FindRequiredPackage(BinPAC) FindRequiredPackage(BinPAC)
if (ENABLE_JEMALLOC)
find_package(JeMalloc)
endif ()
if (MISSING_PREREQS) if (MISSING_PREREQS)
foreach (prereq ${MISSING_PREREQ_DESCS}) foreach (prereq ${MISSING_PREREQ_DESCS})
message(SEND_ERROR ${prereq}) message(SEND_ERROR ${prereq})
@ -74,19 +71,13 @@ if (MISSING_PREREQS)
message(FATAL_ERROR "Configuration aborted due to missing prerequisites") message(FATAL_ERROR "Configuration aborted due to missing prerequisites")
endif () endif ()
set(libmagic_req 5.04)
if ( LibMagic_VERSION VERSION_LESS ${libmagic_req} )
message(FATAL_ERROR "libmagic of at least version ${libmagic_req} required "
"(found ${LibMagic_VERSION})")
endif ()
include_directories(BEFORE include_directories(BEFORE
${PCAP_INCLUDE_DIR} ${PCAP_INCLUDE_DIR}
${OpenSSL_INCLUDE_DIR} ${OpenSSL_INCLUDE_DIR}
${BIND_INCLUDE_DIR} ${BIND_INCLUDE_DIR}
${BinPAC_INCLUDE_DIR} ${BinPAC_INCLUDE_DIR}
${LibMagic_INCLUDE_DIR}
${ZLIB_INCLUDE_DIR} ${ZLIB_INCLUDE_DIR}
${JEMALLOC_INCLUDE_DIR}
) )
# Optional Dependencies # Optional Dependencies
@ -153,9 +144,10 @@ if (NOT DISABLE_ELASTICSEARCH AND LIBCURL_FOUND)
list(APPEND OPTLIBS ${LibCURL_LIBRARIES}) list(APPEND OPTLIBS ${LibCURL_LIBRARIES})
endif() endif()
if (ENABLE_PERFTOOLS_DEBUG) if (ENABLE_PERFTOOLS_DEBUG OR ENABLE_PERFTOOLS)
# Just a no op to prevent CMake from complaining about manually-specified # Just a no op to prevent CMake from complaining about manually-specified
# ENABLE_PERFTOOLS_DEBUG not being used if google perftools weren't found # ENABLE_PERFTOOLS_DEBUG or ENABLE_PERFTOOLS not being used if google
# perftools weren't found
endif () endif ()
set(brodeps set(brodeps
@ -163,8 +155,8 @@ set(brodeps
${PCAP_LIBRARY} ${PCAP_LIBRARY}
${OpenSSL_LIBRARIES} ${OpenSSL_LIBRARIES}
${BIND_LIBRARY} ${BIND_LIBRARY}
${LibMagic_LIBRARY}
${ZLIB_LIBRARY} ${ZLIB_LIBRARY}
${JEMALLOC_LIBRARIES}
${OPTLIBS} ${OPTLIBS}
) )
@ -201,11 +193,6 @@ CheckOptionalBuildSources(aux/broctl Broctl INSTALL_BROCTL)
CheckOptionalBuildSources(aux/bro-aux Bro-Aux INSTALL_AUX_TOOLS) CheckOptionalBuildSources(aux/bro-aux Bro-Aux INSTALL_AUX_TOOLS)
CheckOptionalBuildSources(aux/broccoli Broccoli INSTALL_BROCCOLI) CheckOptionalBuildSources(aux/broccoli Broccoli INSTALL_BROCCOLI)
install(DIRECTORY ./magic/ DESTINATION ${BRO_MAGIC_INSTALL_PATH} FILES_MATCHING
PATTERN "COPYING" EXCLUDE
PATTERN "*"
)
######################################################################## ########################################################################
## Packaging Setup ## Packaging Setup
@ -250,6 +237,7 @@ message(
"\ngperftools found: ${HAVE_PERFTOOLS}" "\ngperftools found: ${HAVE_PERFTOOLS}"
"\n tcmalloc: ${USE_PERFTOOLS_TCMALLOC}" "\n tcmalloc: ${USE_PERFTOOLS_TCMALLOC}"
"\n debugging: ${USE_PERFTOOLS_DEBUG}" "\n debugging: ${USE_PERFTOOLS_DEBUG}"
"\njemalloc: ${ENABLE_JEMALLOC}"
"\ncURL: ${USE_CURL}" "\ncURL: ${USE_CURL}"
"\n" "\n"
"\nDataSeries: ${USE_DATASERIES}" "\nDataSeries: ${USE_DATASERIES}"

View file

@ -1,4 +1,4 @@
Copyright (c) 1995-2012, The Regents of the University of California Copyright (c) 1995-2013, The Regents of the University of California
through the Lawrence Berkeley National Laboratory and the through the Lawrence Berkeley National Laboratory and the
International Computer Science Institute. All rights reserved. International Computer Science Institute. All rights reserved.

313
INSTALL
View file

@ -1,314 +1,3 @@
.. _CMake: http://www.cmake.org
.. _SWIG: http://www.swig.org
.. _Xcode: https://developer.apple.com/xcode/
.. _MacPorts: http://www.macports.org
.. _Fink: http://www.finkproject.org
.. _Homebrew: http://mxcl.github.com/homebrew
.. _bro downloads page: http://bro.org/download/index.html
============== See doc/install/install.rst for installation instructions.
Installing Bro
==============
Bro can be downloaded in either pre-built binary package or
source code forms.
Prerequisites
=============
Bro requires the following libraries and tools to be installed
before you begin:
* Libpcap http://www.tcpdump.org
* OpenSSL libraries http://www.openssl.org
* BIND8 library
* Libmagic
* Libz
* Bash (for BroControl)
To build Bro from source, the following additional dependencies are required:
* CMake 2.6.3 or greater http://www.cmake.org
* SWIG http://www.swig.org
* Bison (GNU Parser Generator)
* Flex (Fast Lexical Analyzer)
* Libpcap headers http://www.tcpdump.org
* OpenSSL headers http://www.openssl.org
* libmagic headers
* zlib headers
* Perl
Bro can make use of some optional libraries and tools if they are found at
build time:
* LibGeoIP (for geo-locating IP addresses)
* gperftools (tcmalloc is used to improve memory and CPU usage)
* sendmail (for BroControl)
* ipsumdump (for trace-summary) http://www.cs.ucla.edu/~kohler/ipsumdump
* Ruby executable, library, and headers (for Broccoli Ruby bindings)
Installing From Pre-Built Binary Release Packages
=================================================
See the `bro downloads page`_ for currently supported/targeted platforms.
* RPM
.. console::
sudo yum localinstall Bro-*.rpm
* DEB
.. console::
sudo gdebi Bro-*.deb
* MacOS Disk Image with Installer
Just open the ``Bro-*.dmg`` and then run the ``.pkg`` installer.
Everything installed by the package will go into ``/opt/bro``.
The primary install prefix for binary packages is ``/opt/bro``.
Non-MacOS packages that include BroControl also put variable/runtime
data (e.g. Bro logs) in ``/var/opt/bro``.
Installing From Source
======================
Required Dependencies
~~~~~~~~~~~~~~~~~~~~~
The following dependencies are required to build Bro:
* RPM/RedHat-based Linux:
.. console::
sudo yum install cmake make gcc gcc-c++ flex bison libpcap-devel openssl-devel python-devel swig zlib-devel file-devel
* DEB/Debian-based Linux:
.. console::
sudo apt-get install cmake make gcc g++ flex bison libpcap-dev libssl-dev python-dev swig zlib1g-dev libmagic-dev
* FreeBSD
Most required dependencies should come with a minimal FreeBSD install
except for the following.
.. console::
sudo pkg_add -r bash cmake swig bison python
Note that ``bash`` needs to be in ``PATH``, which by default it is
not. The FreeBSD package installs the binary into
``/usr/local/bin``.
* Mac OS X
Compiling source code on Macs requires first downloading Xcode_,
then going through its "Preferences..." -> "Downloads" menus to
install the "Command Line Tools" component.
Lion (10.7) and Mountain Lion (10.8) come with all required
dependencies except for CMake_, SWIG_, and ``libmagic``.
Distributions of these dependencies can likely be obtained from your
preferred Mac OS X package management system (e.g. MacPorts_, Fink_,
or Homebrew_).
Specifically for MacPorts, the ``swig``, ``swig-ruby``, ``swig-python``
and ``file`` packages provide the required dependencies.
Optional Dependencies
~~~~~~~~~~~~~~~~~~~~~
Bro can use libGeoIP for geo-locating IP addresses, and sendmail for
sending emails.
* RedHat Enterprise Linux:
.. console::
sudo yum install geoip-devel sendmail
* CentOS Linux:
.. console::
sudo yum install GeoIP-devel sendmail
* DEB/Debian-based Linux:
.. console::
sudo apt-get install libgeoip-dev sendmail
* Ports-based FreeBSD
.. console::
sudo pkg_add -r GeoIP
sendmail is typically already available.
* Mac OS X
Vanilla OS X installations don't ship with libGeoIP, but
if installed from your preferred package management system (e.g. MacPorts,
Fink, or Homebrew), they should be automatically detected and Bro will
compile against them.
Additional steps may be needed to :doc:`get the right GeoIP database <geoip>`.
Compiling Bro Source Code
~~~~~~~~~~~~~~~~~~~~~~~~~
Bro releases are bundled into source packages for convenience and
available from the `bro downloads page`_.
Alternatively, the latest Bro development version can be obtained through git
repositories hosted at `git.bro.org <http://git.bro.org>`_. See
our `git development documentation
<http://bro.org/development/process.html>`_ for comprehensive
information on Bro's use of git revision control, but the short story
for downloading the full source code experience for Bro via git is:
.. console::
git clone --recursive git://git.bro.org/bro
.. note:: If you choose to clone the ``bro`` repository non-recursively for
a "minimal Bro experience", be aware that compiling it depends on
BinPAC, which has its own ``binpac`` repository. Either install it
first or initialize/update the cloned ``bro`` repository's
``aux/binpac`` submodule.
The typical way to build and install from source is (for more options,
run ``./configure --help``):
.. console::
./configure
make
make install
The default installation path is ``/usr/local/bro``, which would typically
require root privileges when doing the ``make install``. A different
installation path can be chosen by specifying the ``--prefix`` option.
Note that ``/usr`` and ``/opt/bro`` are the
standard prefixes for binary Bro packages to be installed, so those are
typically not good choices unless you are creating such a package.
Depending on the Bro package you downloaded, there may be auxiliary
tools and libraries available in the ``aux/`` directory. Some of them
will be automatically built and installed along with Bro. There are
``--disable-*`` options that can be given to the configure script to
turn off unwanted auxiliary projects that would otherwise be installed
automatically. Finally, use ``make install-aux`` to install some of
the other programs that are in the ``aux/bro-aux`` directory.
OpenBSD users, please see our FAQ at
http://www.bro.org/documentation/faq.html if you are having
problems installing Bro.
Upgrading From a Previous Version of Bro
========================================
If you're doing an upgrade install (rather than a fresh install),
there's two suggested approaches: either install Bro using the same
installation prefix directory as before, or pick a new prefix and copy
local customizations over.
Re-Use Previous Install Prefix
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you choose to configure and install Bro with the same prefix
directory as before, local customization and configuration to files in
``$prefix/share/bro/site`` and ``$prefix/etc`` won't be overwritten
(``$prefix`` indicating the root of where Bro was installed). Also, logs
generated at run-time won't be touched by the upgrade. (But making
a backup of local changes before upgrading is still recommended.)
After upgrading, remember to check ``$prefix/share/bro/site`` and
``$prefix/etc`` for ``.example`` files, which indicate the
distribution's version of the file differs from the local one, which may
include local changes. Review the differences, and make adjustments
as necessary (for differences that aren't the result of a local change,
use the new version's).
Pick a New Install prefix
~~~~~~~~~~~~~~~~~~~~~~~~~
If you want to install the newer version in a different prefix
directory than before, you can just copy local customization and
configuration files from ``$prefix/share/bro/site`` and ``$prefix/etc``
to the new location (``$prefix`` indicating the root of where Bro was
originally installed). Make sure to review the files for difference
before copying and make adjustments as necessary (for differences that
aren't the result of a local change, use the new version's). Of
particular note, the copied version of ``$prefix/etc/broctl.cfg`` is
likely to need changes to the ``SpoolDir`` and ``LogDir`` settings.
Configure the Run-Time Environment
==================================
Just remember that you may need to adjust your ``PATH`` environment variable
according to the platform/shell/package you're using. For example:
Bourne-Shell Syntax:
.. console::
export PATH=/usr/local/bro/bin:$PATH
C-Shell Syntax:
.. console::
setenv PATH /usr/local/bro/bin:$PATH
Or substitute ``/opt/bro/bin`` instead if you installed from a binary package.
Running Bro
===========
Bro is a complex program and it takes a bit of time to get familiar
with it. A good place for newcomers to start is the Quick Start Guide
at http://www.bro.org/documentation/quickstart.html.
For developers that wish to run Bro directly from the ``build/``
directory (i.e., without performing ``make install``), they will have
to first adjust ``BROPATH`` to look for scripts inside the build
directory. Sourcing either ``build/bro-path-dev.sh`` or
``build/bro-path-dev.csh`` as appropriate for the current shell
accomplishes this and also augments your ``PATH`` so you can use the
Bro binary directly::
./configure
make
source build/bro-path-dev.sh
bro <options>

View file

@ -6,7 +6,7 @@
# #
BUILD=build BUILD=build
REPO=`basename \`git config --get remote.origin.url\`` REPO=`basename \`git config --get remote.origin.url | sed 's/^[^:]*://g'\``
VERSION_FULL=$(REPO)-`cat VERSION` VERSION_FULL=$(REPO)-`cat VERSION`
VERSION_MIN=$(REPO)-`cat VERSION`-minimal VERSION_MIN=$(REPO)-`cat VERSION`-minimal
HAVE_MODULES=git submodule | grep -v cmake >/dev/null HAVE_MODULES=git submodule | grep -v cmake >/dev/null
@ -29,28 +29,17 @@ doc: configured
docclean: configured docclean: configured
$(MAKE) -C $(BUILD) $@ $(MAKE) -C $(BUILD) $@
restdoc: configured
$(MAKE) -C $(BUILD) $@
restclean: configured
$(MAKE) -C $(BUILD) $@
broxygen: configured
$(MAKE) -C $(BUILD) $@
broxygenclean: configured
$(MAKE) -C $(BUILD) $@
dist: dist:
@rm -rf $(VERSION_FULL) $(VERSION_FULL).tgz @rm -rf $(VERSION_FULL) $(VERSION_FULL).tgz
@rm -rf $(VERSION_MIN) $(VERSION_MIN).tgz @rm -rf $(VERSION_MIN) $(VERSION_MIN).tgz
@mkdir $(VERSION_FULL) @git clone --recursive . $(VERSION_FULL) >/dev/null 2>&1
@tar --exclude=$(VERSION_FULL)* --exclude=$(VERSION_MIN)* --exclude=.git -cf - . | ( cd $(VERSION_FULL) && tar -xpf - ) @find $(VERSION_FULL) -name .git\* | xargs rm -rf
@( cd $(VERSION_FULL) && cp -R ../.git . && git reset -q --hard HEAD && git clean -xdfq && rm -rf .git )
@tar -czf $(VERSION_FULL).tgz $(VERSION_FULL) && echo Package: $(VERSION_FULL).tgz && rm -rf $(VERSION_FULL) @tar -czf $(VERSION_FULL).tgz $(VERSION_FULL) && echo Package: $(VERSION_FULL).tgz && rm -rf $(VERSION_FULL)
@$(HAVE_MODULES) && mkdir $(VERSION_MIN) || exit 0 @$(HAVE_MODULES) && git clone . $(VERSION_MIN) >/dev/null 2>&1 || exit 0
@$(HAVE_MODULES) && tar --exclude=$(VERSION_FULL)* --exclude=$(VERSION_MIN)* --exclude=.git `git submodule | awk '{print "--exclude="$$2}' | grep -v cmake | tr '\n' ' '` -cf - . | ( cd $(VERSION_MIN) && tar -xpf - ) || exit 0 @$(HAVE_MODULES) && (cd $(VERSION_MIN) && git submodule update --init cmake >/dev/null 2>&1) || exit 0
@$(HAVE_MODULES) && ( cd $(VERSION_MIN) && cp -R ../.git . && git reset -q --hard HEAD && git clean -xdfq && rm -rf .git ) || exit 0 @$(HAVE_MODULES) && (cd $(VERSION_MIN) && git submodule update --init src/3rdparty >/dev/null 2>&1) || exit 0
@$(HAVE_MODULES) && (cd $(VERSION_MIN) && git submodule update --init magic >/dev/null 2>&1) || exit 0
@$(HAVE_MODULES) && find $(VERSION_MIN) -name .git\* | xargs rm -rf || exit 0
@$(HAVE_MODULES) && tar -czf $(VERSION_MIN).tgz $(VERSION_MIN) && echo Package: $(VERSION_MIN).tgz && rm -rf $(VERSION_MIN) || exit 0 @$(HAVE_MODULES) && tar -czf $(VERSION_MIN).tgz $(VERSION_MIN) && echo Package: $(VERSION_MIN).tgz && rm -rf $(VERSION_MIN) || exit 0
bindist: bindist:
@ -65,6 +54,7 @@ test:
test-all: test test-all: test
test -d aux/broctl && ( cd aux/broctl && make test ) test -d aux/broctl && ( cd aux/broctl && make test )
test -d aux/btest && ( cd aux/btest && make test )
configured: configured:
@test -d $(BUILD) || ( echo "Error: No build/ directory found. Did you run configure?" && exit 1 ) @test -d $(BUILD) || ( echo "Error: No build/ directory found. Did you run configure?" && exit 1 )

901
NEWS

File diff suppressed because it is too large Load diff

10
README
View file

@ -8,11 +8,21 @@ and pointers for getting started. NEWS contains release notes for the
current version, and CHANGES has the complete history of changes. current version, and CHANGES has the complete history of changes.
Please see COPYING for licensing information. Please see COPYING for licensing information.
You can download source and binary releases on:
http://www.bro.org/download
To get the current development version, clone our master git
repository:
git clone --recursive git://git.bro.org/bro
For more documentation, research publications, and community contact For more documentation, research publications, and community contact
information, please see Bro's home page: information, please see Bro's home page:
http://www.bro.org http://www.bro.org
On behalf of the Bro Development Team, On behalf of the Bro Development Team,
Vern Paxson & Robin Sommer, Vern Paxson & Robin Sommer,

View file

@ -1 +1 @@
2.1-888 2.3-beta-18

@ -1 +1 @@
Subproject commit 896ddedde55c48ec2163577fc258b49c418abb3e Subproject commit ec1e052afd5a8cd3d1d2cbb28fcd688018e379a5

@ -1 +1 @@
Subproject commit a9942558c7d3dfd80148b8aaded64c82ade3d117 Subproject commit 5721df4f5f6fa84de6257cca6582a28e45831786

@ -1 +1 @@
Subproject commit 889f9c65944ceac20ad9230efc39d33e6e1221c3 Subproject commit c2f5dd2cb7876158fdf9721aebd22567db840db1

@ -1 +1 @@
Subproject commit 0cd102805e73343cab3f9fd4a76552e13940dad9 Subproject commit 1e55bff2df49fe7dc3bd54e21050b39530eeb714

@ -1 +1 @@
Subproject commit ce366206e3407e534a786ad572c342e9f9fef26b Subproject commit 4da1bd24038d4977e655f2b210f34e37f0b73b78

View file

@ -10,10 +10,4 @@
# BROPATH=`./bro-path-dev` ./src/bro # BROPATH=`./bro-path-dev` ./src/bro
# #
broPolicies=${BRO_SCRIPT_SOURCE_PATH}:${BRO_SCRIPT_SOURCE_PATH}/policy:${BRO_SCRIPT_SOURCE_PATH}/site echo .:${CMAKE_SOURCE_DIR}/scripts:${CMAKE_SOURCE_DIR}/scripts/policy:${CMAKE_SOURCE_DIR}/scripts/site:${CMAKE_BINARY_DIR}/scripts
broGenPolicies=${CMAKE_BINARY_DIR}/scripts
installedPolicies=${BRO_SCRIPT_INSTALL_PATH}:${BRO_SCRIPT_INSTALL_PATH}/site
echo .:$broPolicies:$broGenPolicies

2
cmake

@ -1 +1 @@
Subproject commit 026639f8368e56742c0cb5d9fb390ea64e60ec50 Subproject commit 0f301aa08a970150195a2ea5b3ed43d2d98b35b3

19
configure vendored
View file

@ -32,12 +32,13 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--enable-perftools force use of Google perftools on non-Linux systems --enable-perftools force use of Google perftools on non-Linux systems
(automatically on when perftools is present on Linux) (automatically on when perftools is present on Linux)
--enable-perftools-debug use Google's perftools for debugging --enable-perftools-debug use Google's perftools for debugging
--enable-jemalloc link against jemalloc
--enable-ruby build ruby bindings for broccoli (deprecated)
--disable-broccoli don't build or install the Broccoli library --disable-broccoli don't build or install the Broccoli library
--disable-broctl don't install Broctl --disable-broctl don't install Broctl
--disable-auxtools don't build or install auxiliary tools --disable-auxtools don't build or install auxiliary tools
--disable-perftools don't try to build with Google Perftools --disable-perftools don't try to build with Google Perftools
--disable-python don't try to build python bindings for broccoli --disable-python don't try to build python bindings for broccoli
--disable-ruby don't try to build ruby bindings for broccoli
--disable-dataseries don't use the optional DataSeries log writer --disable-dataseries don't use the optional DataSeries log writer
--disable-elasticsearch don't use the optional ElasticSearch log writer --disable-elasticsearch don't use the optional ElasticSearch log writer
@ -49,11 +50,12 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--with-flex=PATH path to flex executable --with-flex=PATH path to flex executable
--with-bison=PATH path to bison executable --with-bison=PATH path to bison executable
--with-perl=PATH path to perl executable --with-perl=PATH path to perl executable
--with-libmagic=PATH path to libmagic install root
Optional Packages in Non-Standard Locations: Optional Packages in Non-Standard Locations:
--with-libmagic=PATH path to libmagic install root
--with-geoip=PATH path to the libGeoIP install root --with-geoip=PATH path to the libGeoIP install root
--with-perftools=PATH path to Google Perftools install root --with-perftools=PATH path to Google Perftools install root
--with-jemalloc=PATH path to jemalloc install root
--with-python=PATH path to Python interpreter --with-python=PATH path to Python interpreter
--with-python-lib=PATH path to libpython --with-python-lib=PATH path to libpython
--with-python-inc=PATH path to Python headers --with-python-inc=PATH path to Python headers
@ -105,6 +107,7 @@ append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc
append_cache_entry ENABLE_DEBUG BOOL false append_cache_entry ENABLE_DEBUG BOOL false
append_cache_entry ENABLE_PERFTOOLS BOOL false append_cache_entry ENABLE_PERFTOOLS BOOL false
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false
append_cache_entry ENABLE_JEMALLOC BOOL false
append_cache_entry BinPAC_SKIP_INSTALL BOOL true append_cache_entry BinPAC_SKIP_INSTALL BOOL true
append_cache_entry BUILD_SHARED_LIBS BOOL true append_cache_entry BUILD_SHARED_LIBS BOOL true
append_cache_entry INSTALL_AUX_TOOLS BOOL true append_cache_entry INSTALL_AUX_TOOLS BOOL true
@ -113,6 +116,7 @@ append_cache_entry INSTALL_BROCTL BOOL true
append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING
append_cache_entry ENABLE_MOBILE_IPV6 BOOL false append_cache_entry ENABLE_MOBILE_IPV6 BOOL false
append_cache_entry DISABLE_PERFTOOLS BOOL false append_cache_entry DISABLE_PERFTOOLS BOOL false
append_cache_entry DISABLE_RUBY_BINDINGS BOOL true
# parse arguments # parse arguments
while [ $# -ne 0 ]; do while [ $# -ne 0 ]; do
@ -159,6 +163,9 @@ while [ $# -ne 0 ]; do
append_cache_entry ENABLE_PERFTOOLS BOOL true append_cache_entry ENABLE_PERFTOOLS BOOL true
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL true append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL true
;; ;;
--enable-jemalloc)
append_cache_entry ENABLE_JEMALLOC BOOL true
;;
--disable-broccoli) --disable-broccoli)
append_cache_entry INSTALL_BROCCOLI BOOL false append_cache_entry INSTALL_BROCCOLI BOOL false
;; ;;
@ -174,8 +181,8 @@ while [ $# -ne 0 ]; do
--disable-python) --disable-python)
append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true
;; ;;
--disable-ruby) --enable-ruby)
append_cache_entry DISABLE_RUBY_BINDINGS BOOL true append_cache_entry DISABLE_RUBY_BINDINGS BOOL false
;; ;;
--disable-dataseries) --disable-dataseries)
append_cache_entry DISABLE_DATASERIES BOOL true append_cache_entry DISABLE_DATASERIES BOOL true
@ -213,6 +220,10 @@ while [ $# -ne 0 ]; do
--with-perftools=*) --with-perftools=*)
append_cache_entry GooglePerftools_ROOT_DIR PATH $optarg append_cache_entry GooglePerftools_ROOT_DIR PATH $optarg
;; ;;
--with-jemalloc=*)
append_cache_entry JEMALLOC_ROOT_DIR PATH $optarg
append_cache_entry ENABLE_JEMALLOC BOOL true
;;
--with-python=*) --with-python=*)
append_cache_entry PYTHON_EXECUTABLE PATH $optarg append_cache_entry PYTHON_EXECUTABLE PATH $optarg
;; ;;

View file

@ -1 +0,0 @@
../CHANGES

View file

@ -1,75 +1,83 @@
set(BIF_SRC_DIR ${PROJECT_SOURCE_DIR}/src) set(BROCCOLI_DOCS_SRC ${CMAKE_BINARY_DIR}/aux/broccoli/doc/html)
set(RST_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/rest_output) set(BROCCOLI_DOCS_DST ${CMAKE_BINARY_DIR}/html/broccoli-api)
set(DOC_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/out) set(SPHINX_INPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx_input)
set(DOC_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(SPHINX_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx_output)
set(DOC_SOURCE_WORKDIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx-sources) set(BROXYGEN_SCRIPT_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/broxygen_script_output)
set(BROXYGEN_CACHE_DIR ${CMAKE_CURRENT_BINARY_DIR}/broxygen_cache)
set(MASTER_POLICY_INDEX ${CMAKE_CURRENT_BINARY_DIR}/scripts/policy_index) # Find out what BROPATH to use when executing bro.
set(MASTER_PACKAGE_INDEX ${CMAKE_CURRENT_BINARY_DIR}/scripts/pkg_index) execute_process(COMMAND ${CMAKE_BINARY_DIR}/bro-path-dev
OUTPUT_VARIABLE BROPATH
RESULT_VARIABLE retval
OUTPUT_STRIP_TRAILING_WHITESPACE)
if (NOT ${retval} EQUAL 0)
message(FATAL_ERROR "Problem setting BROPATH")
endif ()
file(GLOB_RECURSE DOC_SOURCES FOLLOW_SYMLINKS "*") # Configure the Sphinx config file (expand variables CMake might know about).
# configure the Sphinx config file (expand variables CMake might know about)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in
${CMAKE_CURRENT_BINARY_DIR}/conf.py ${CMAKE_CURRENT_BINARY_DIR}/conf.py
@ONLY) @ONLY)
add_subdirectory(scripts) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/broxygen.conf.in
${CMAKE_CURRENT_BINARY_DIR}/broxygen.conf
@ONLY)
# The "broxygen" target generates reST documentation for any outdated bro add_custom_target(sphinxdoc
# scripts and then uses Sphinx to generate HTML documentation from the reST # Copy the template documentation to build directory to use as input tree
add_custom_target(broxygen # for Sphinx. This is needed because some parts are dynamically generated
# copy the template documentation to the build directory # in to that tree by Bro/Broxygen.
# to give as input for sphinx COMMAND rsync -q -r --copy-links --times --delete
COMMAND "${CMAKE_COMMAND}" -E copy_directory --filter='protect scripts/*'
${DOC_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/ ${SPHINX_INPUT_DIR}
${DOC_SOURCE_WORKDIR} # Use Bro/Broxygen to dynamically generate reST for all Bro scripts.
# copy generated policy script documentation into the COMMAND BROPATH=${BROPATH}
# working copy of the template documentation ${CMAKE_BINARY_DIR}/src/bro
COMMAND "${CMAKE_COMMAND}" -E copy_directory -X ${CMAKE_CURRENT_BINARY_DIR}/broxygen.conf
${RST_OUTPUT_DIR} broxygen >/dev/null
${DOC_SOURCE_WORKDIR}/scripts # Rsync over the generated reST to the Sphinx source tree in the build dir.
# append to the master index of all policy scripts COMMAND rsync -q -r --copy-links --times --delete --filter='protect *.bro'
COMMAND cat ${MASTER_POLICY_INDEX} >> ${BROXYGEN_SCRIPT_OUTPUT}/ ${SPHINX_INPUT_DIR}/scripts
${DOC_SOURCE_WORKDIR}/scripts/index.rst # Rsync over Bro scripts to the Sphinx source tree in the build dir.
# append to the master index of all policy packages # These are used by :download: references in the generated script docs.
COMMAND cat ${MASTER_PACKAGE_INDEX} >> COMMAND rsync -q -r --copy-links --times --delete
${DOC_SOURCE_WORKDIR}/scripts/packages.rst --filter='protect /base/bif/*' --filter='protect *.rst'
# construct a reST file for each group --filter='include */' --filter='include *.bro' --filter='exclude *'
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/bin/group_index_generator.py ${CMAKE_SOURCE_DIR}/scripts/ ${SPHINX_INPUT_DIR}/scripts
${CMAKE_CURRENT_BINARY_DIR}/scripts/group_list # Rsync over Bro scripts created by BIF compiler to the Sphinx source tree.
${CMAKE_CURRENT_BINARY_DIR}/scripts COMMAND rsync -q -r --copy-links --times --delete
${DOC_SOURCE_WORKDIR}/scripts --filter='protect *.rst' --filter='include */'
# tell sphinx to generate html --filter='include *.bro' --filter='exclude *'
COMMAND sphinx-build ${CMAKE_BINARY_DIR}/scripts/base/bif/
-b html ${SPHINX_INPUT_DIR}/scripts/base/bif
-c ${CMAKE_CURRENT_BINARY_DIR} # Use Sphinx to build HTML.
-d ${DOC_OUTPUT_DIR}/doctrees COMMAND sphinx-build
${DOC_SOURCE_WORKDIR} -b html
${DOC_OUTPUT_DIR}/html -c ${CMAKE_CURRENT_BINARY_DIR}
# create symlink to the html output directory for convenience -d ${SPHINX_OUTPUT_DIR}/doctrees
COMMAND "${CMAKE_COMMAND}" -E create_symlink ${SPHINX_INPUT_DIR}
${DOC_OUTPUT_DIR}/html ${SPHINX_OUTPUT_DIR}/html
${CMAKE_BINARY_DIR}/html # Create symlink to the html output directory for convenience.
# copy Broccoli API reference into output dir if it exists COMMAND "${CMAKE_COMMAND}" -E create_symlink
COMMAND test -d ${CMAKE_BINARY_DIR}/aux/broccoli/doc/html && ( rm -rf ${CMAKE_BINARY_DIR}/html/broccoli-api && cp -r ${CMAKE_BINARY_DIR}/aux/broccoli/doc/html ${CMAKE_BINARY_DIR}/html/broccoli-api ) || true ${SPHINX_OUTPUT_DIR}/html
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_BINARY_DIR}/html
COMMENT "[Sphinx] Generating HTML policy script docs" # Copy Broccoli API reference into output dir if it exists.
# SOURCES just adds stuff to IDE projects as a convenience COMMAND test -d ${BROCCOLI_DOCS_SRC} &&
SOURCES ${DOC_SOURCES}) ( rm -rf ${BROCCOLI_DOCS_DST} &&
cp -r ${BROCCOLI_DOCS_SRC} ${BROCCOLI_DOCS_DST} ) || true
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "[Sphinx] Generate HTML documentation in ${CMAKE_BINARY_DIR}/html")
# The "sphinxclean" target removes just the Sphinx input/output directories add_dependencies(sphinxdoc bro)
# from the build directory.
add_custom_target(broxygenclean
COMMAND "${CMAKE_COMMAND}" -E remove_directory
${DOC_SOURCE_WORKDIR}
COMMAND "${CMAKE_COMMAND}" -E remove_directory
${DOC_OUTPUT_DIR}
VERBATIM)
add_dependencies(broxygen broxygenclean restdoc) add_custom_target(sphinxdoc_clean
COMMAND "${CMAKE_COMMAND}" -E remove_directory ${SPHINX_INPUT_DIR}
COMMAND "${CMAKE_COMMAND}" -E remove_directory ${SPHINX_OUTPUT_DIR}
COMMAND "${CMAKE_COMMAND}" -E remove_directory ${BROXYGEN_SCRIPT_OUTPUT}
COMMAND "${CMAKE_COMMAND}" -E remove_directory ${BROXYGEN_CACHE_DIR}
VERBATIM)
add_custom_target(doc) add_custom_target(doc)
add_custom_target(docclean) add_custom_target(docclean)
add_dependencies(doc broxygen) add_dependencies(doc sphinxdoc)
add_dependencies(docclean broxygenclean restclean) add_dependencies(docclean sphinxdoc_clean)

View file

@ -1 +0,0 @@
../INSTALL

5
doc/LICENSE Normal file
View file

@ -0,0 +1,5 @@
This work is licensed under the Creative Commons
Attribution-NonCommercial 3.0 Unported License. To view a copy of this
license, visit http://creativecommons.org/licenses/by-nc/3.0/ or send
a letter to Creative Commons, 444 Castro Street, Suite 900, Mountain
View, California, 94041, USA.

View file

@ -10,22 +10,22 @@ common/general documentation, style sheets, JavaScript, etc. The Sphinx
config file is produced from ``conf.py.in``, and can be edited to change config file is produced from ``conf.py.in``, and can be edited to change
various Sphinx options. various Sphinx options.
There is also a custom Sphinx domain implemented in ``source/ext/bro.py`` There is also a custom Sphinx domain implemented in ``ext/bro.py``
which adds some reST directives and roles that aid in generating useful which adds some reST directives and roles that aid in generating useful
index entries and cross-references. Other extensions can be added in index entries and cross-references. Other extensions can be added in
a similar fashion. a similar fashion.
Either the ``make doc`` or ``make broxygen`` targets in the top-level The ``make doc`` target in the top-level Makefile can be used to locally
Makefile can be used to locally render the reST files into HTML. render the reST files into HTML. That target depends on:
Those targets depend on:
* Python interpreter >= 2.5 * Python interpreter >= 2.5
* `Sphinx <http://sphinx.pocoo.org/>`_ >= 1.0.1 * `Sphinx <http://sphinx-doc.org/>`_ >= 1.0.1
* Doxygen (required only for building the Broccoli API doc)
After completion, HTML documentation is symlinked in ``build/html``. After completion, HTML documentation is symlinked in ``build/html``.
There's also ``make docclean`` and ``make broxygenclean`` targets to There's also a ``make docclean`` target which deletes any files
clean the resulting documentation. created during the documentation build process.
Notes for Writing Documentation Notes for Writing Documentation
------------------------------- -------------------------------

View file

@ -439,8 +439,17 @@ td.linenos pre {
color: #aaa; color: #aaa;
} }
.highlight-guess {
overflow:auto;
}
.highlight-none {
overflow:auto;
}
table.highlighttable { table.highlighttable {
margin-left: 0.5em; margin-left: 0.5em;
overflow:scroll;
} }
table.highlighttable td { table.highlighttable td {

View file

@ -150,8 +150,14 @@ sup, sub {
line-height:0; line-height:0;
} }
pre { pre, code {
white-space:pre; white-space: pre;
overflow: auto;
margin-left: 2em;
margin-right: 2em;
margin-top: .5em;
margin-bottom: 1.5em;
word-wrap: normal;
} }
pre, code, tt { pre, code, tt {
@ -166,6 +172,10 @@ dl dt {
font-weight: bold; font-weight: bold;
} }
li dl dt {
font-weight: normal;
}
dd { dd {
margin:0 0 20px 20px; margin:0 0 20px 20px;
} }
@ -174,6 +184,16 @@ small {
font-size:75%; font-size:75%;
} }
.small-text {
font-size:75%;
}
.align-center {
display: block;
margin-left: auto;
margin-right: auto;
}
a:link, a:link,
a:visited, a:visited,
a:active a:active
@ -434,4 +454,32 @@ li {
padding-bottom: 0em; padding-bottom: 0em;
margin-bottom: .5em; margin-bottom: .5em;
margin-top: 0em; margin-top: 0em;
}
.btest-cmd .hll {
font-weight: bold;
background: #FFFAE2;
}
.btest-include .hll {
display: block;
text-align: center;
font-family: Palatino;
background: #FFFAE2;
}
.btest-include .hll * {
color: #aaa;
}
.linenodiv pre {
margin-left: 0px;
margin-right: 0px;
width: 1.5em;
text-align: right;
background: #000;
}
.btest-cmd .code pre, .btest-include .code pre {
margin-left: 0px;
} }

View file

@ -1,62 +0,0 @@
#! /usr/bin/env python
# This script automatically generates a reST documents that lists
# a collection of Bro scripts that are "grouped" together.
# The summary text (##! comments) of the script is embedded in the list
#
# 1st argument is the file containing list of groups
# 2nd argument is the directory containing ${group}_files lists of
# scripts that belong to the group and ${group}_doc_names lists of
# document names that can be supplied to a reST :doc: role
# 3rd argument is a directory in which write a ${group}.rst file (will
# append to existing file) that contains reST style references to
# script docs along with summary text contained in original script
import sys
import os
import string
group_list = sys.argv[1]
file_manifest_dir = sys.argv[2]
output_dir = sys.argv[3]
with open(group_list, 'r') as f_group_list:
for group in f_group_list.read().splitlines():
#print group
file_manifest = os.path.join(file_manifest_dir, group + "_files")
doc_manifest = os.path.join(file_manifest_dir, group + "_doc_names")
src_files = []
doc_names = []
with open(file_manifest, 'r') as f_file_manifest:
src_files = f_file_manifest.read().splitlines()
with open(doc_manifest, 'r') as f_doc_manifest:
doc_names = f_doc_manifest.read().splitlines()
for i in range(len(src_files)):
src_file = src_files[i]
#print "\t" + src_file
summary_comments = []
with open(src_file, 'r') as f_src_file:
for line in f_src_file:
sum_pos = string.find(line, "##!")
if sum_pos != -1:
summary_comments.append(line[(sum_pos+3):])
#print summary_comments
group_file = os.path.join(output_dir, group + ".rst")
if not os.path.exists(group_file):
if not os.path.exists(os.path.dirname(group_file)):
os.makedirs(os.path.dirname(group_file))
with open(group_file, 'w') as f_group_file:
f_group_file.write(":orphan:\n\n")
title = "Package Index: %s\n" % os.path.dirname(group)
f_group_file.write(title);
for n in range(len(title)):
f_group_file.write("=")
f_group_file.write("\n");
with open(group_file, 'a') as f_group_file:
f_group_file.write("\n:doc:`/scripts/%s`\n" % doc_names[i])
for line in summary_comments:
f_group_file.write(" " + line)

79
doc/broids/index.rst Normal file
View file

@ -0,0 +1,79 @@
.. _bro-ids:
=======
Bro IDS
=======
An Intrusion Detection System (IDS) allows you to detect suspicious
activities happening on your network as a result of a past or active
attack. Because of its programming capabilities, Bro can easily be
configured to behave like traditional IDSs and detect common attacks
with well known patterns, or you can create your own scripts to detect
conditions specific to your particular case.
In the following sections, we present a few examples of common uses of
Bro as an IDS.
-------------------------------------------------
Detecting an FTP Brute-force Attack and Notifying
-------------------------------------------------
For the purpose of this exercise, we define FTP brute-forcing as too many
rejected usernames and passwords occurring from a single address. We
start by defining a threshold for the number of attempts, a monitoring
interval (in minutes), and a new notice type.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ftp/detect-bruteforcing.bro
:lines: 9-25
Using the ftp_reply event, we check for error codes from the `500
series <http://en.wikipedia.org/wiki/List_of_FTP_server_return_codes>`_
for the "USER" and "PASS" commands, representing rejected usernames or
passwords. For this, we can use the :bro:see:`FTP::parse_ftp_reply_code`
function to break down the reply code and check if the first digit is a
"5" or not. If true, we then use the :ref:`Summary Statistics Framework
<sumstats-framework>` to keep track of the number of failed attempts.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ftp/detect-bruteforcing.bro
:lines: 52-60
Next, we use the SumStats framework to raise a notice of the attack when
the number of failed attempts exceeds the specified threshold during the
measuring interval.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ftp/detect-bruteforcing.bro
:lines: 28-50
Below is the final code for our script.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ftp/detect-bruteforcing.bro
.. btest:: ftp-bruteforce
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/ftp/bruteforce.pcap protocols/ftp/detect-bruteforcing.bro
@TEST-EXEC: btest-rst-include notice.log
As a final note, the :doc:`detect-bruteforcing.bro
</scripts/policy/protocols/ftp/detect-bruteforcing.bro>` script above is
included with Bro out of the box. Use this feature by loading this script
during startup.
-------------
Other Attacks
-------------
Detecting SQL Injection Attacks
-------------------------------
Checking files against known malware hashes
-------------------------------------------
Files transmitted on your network could either be completely harmless or
contain viruses and other threats. One possible action against this
threat is to compute the hashes of the files and compare them against a
list of known malware hashes. Bro simplifies this task by offering a
:doc:`detect-MHR.bro </scripts/policy/frameworks/files/detect-MHR.bro>`
script that creates and compares hashes against the `Malware Hash
Registry <https://www.team-cymru.org/Services/MHR/>`_ maintained by Team
Cymru. Use this feature by loading this script during startup.

1
doc/broxygen.conf.in Normal file
View file

@ -0,0 +1 @@
script * @BROXYGEN_SCRIPT_OUTPUT@/

View file

@ -1,84 +0,0 @@
Bro Cluster
===========
Intro
------
Bro is not multithreaded, so once the limitations of a single processor core are reached, the only option currently is to spread the workload across many cores or even many physical computers. The cluster deployment scenario for Bro is the current solution to build these larger systems. The accompanying tools and scripts provide the structure to easily manage many Bro processes examining packets and doing correlation activities but acting as a singular, cohesive entity.
Architecture
---------------
The figure below illustrates the main components of a Bro cluster.
.. image:: images/deployment.png
Tap
***
This is a mechanism that splits the packet stream in order to make a copy
available for inspection. Examples include the monitoring port on a switch and
an optical splitter for fiber networks.
Frontend
********
This is a discrete hardware device or on-host technique that will split your traffic into many streams or flows. The Bro binary does not do this job. There are numerous ways to accomplish this task, some of which are described below in `Frontend Options`_.
Manager
*******
This is a Bro process which has two primary jobs. It receives log messages and notices from the rest of the nodes in the cluster using the Bro communications protocol. The result is that you will end up with single logs for each log instead of many discrete logs that you have to later combine in some manner with post processing. The manager also takes the opportunity to de-duplicate notices and it has the ability to do so since its acting as the choke point for notices and how notices might be processed into actions such as emailing, paging, or blocking.
The manager process is started first by BroControl and it only opens its designated port and waits for connections, it doesnt initiate any connections to the rest of the cluster. Once the workers are started and connect to the manager, logs and notices will start arriving to the manager process from the workers.
Proxy
*****
This is a Bro process which manages synchronized state. Variables can be synchronized across connected Bro processes automatically in Bro and proxies will help the workers by alleviating the need for all of the workers to connect directly to each other.
Examples of synchronized state from the scripts that ship with Bro are things such as the full list of “known” hosts and services which are hosts or services which have been detected as performing full TCP handshakes or an analyzed protocol has been found on the connection. If worker A detects host 1.2.3.4 as an active host, it would be beneficial for worker B to know that as well so worker A shares that information as an insertion to a set <link to set documentation would be good here> which travels to the clusters proxy and the proxy then sends that same set insertion to worker B. The result is that worker A and worker B have shared knowledge about host and services that are active on the network being monitored.
The proxy model extends to having multiple proxies as well if necessary for performance reasons, it only adds one additional step for the Bro processes. Each proxy connects to another proxy in a ring and the workers are shared between them as evenly as possible. When a proxy receives some new bit of state, it will share that with its proxy which is then shared around the ring of proxies and down to all of the workers. From a practical standpoint, there are no rules of thumb established yet for the number of proxies necessary for the number of workers they are serving. Best is to start with a single proxy and add more if communication performance problems are found.
Bro processes acting as proxies dont tend to be extremely intense to CPU or memory and users frequently run proxy processes on the same physical host as the manager.
Worker
******
This is the Bro process that sniffs network traffic and does protocol analysis on the reassembled traffic streams. Most of the work of an active cluster takes place on the workers and as such, the workers typically represent the bulk of the Bro processes that are running in a cluster. The fastest memory and CPU core speed you can afford is best here since all of the protocol parsing and most analysis will take place here. There are no particular requirements for the disks in workers since almost all logging is done remotely to the manager and very little is normally written to disk.
The rule of thumb we have followed recently is to allocate approximately 1 core for every 80Mbps of traffic that is being analyzed, however this estimate could be extremely traffic mix specific. It has generally worked for mixed traffic with many users and servers. For example, if your traffic peaks around 2Gbps (combined) and you want to handle traffic at peak load, you may want to have 26 cores available (2048 / 80 == 25.6). If the 80Mbps estimate works for your traffic, this could be handled by 3 physical hosts dedicated to being workers with each one containing dual 6-core processors.
Once a flow based load balancer is put into place this model is extremely easy to scale as well so its recommended that you guess at the amount of hardware you will need to fully analyze your traffic. If it turns out that you need more, its relatively easy to increase the size of the cluster in most cases.
Frontend Options
----------------
There are many options for setting up a frontend flow distributor and in many cases it may even be beneficial to do multiple stages of flow distribution on the network and on the host.
Discrete hardware flow balancers
********************************
cPacket
^^^^^^^
If you are monitoring one or more 10G physical interfaces, the recommended solution is to use either a cFlow or cVu device from cPacket because they are currently being used very successfully at a number of sites. These devices will perform layer-2 load balancing by rewriting the destination ethernet MAC address to cause each packet associated with a particular flow to have the same destination MAC. The packets can then be passed directly to a monitoring host where each worker has a BPF filter to limit its visibility to only that stream of flows or onward to a commodity switch to split the traffic out to multiple 1G interfaces for the workers. This can ultimately greatly reduce costs since workers can use relatively inexpensive 1G interfaces.
OpenFlow Switches
^^^^^^^^^^^^^^^^^
We are currently exploring the use of OpenFlow based switches to do flow based load balancing directly on the switch which can greatly reduce frontend costs for many users. This document will be updated when we have more information.
On host flow balancing
**********************
PF_RING
^^^^^^^
The PF_RING software for Linux has a “clustering” feature which will do flow based load balancing across a number of processes that are sniffing the same interface. This will allow you to easily take advantage of multiple cores in a single physical host because Bros main event loop is single threaded and cant natively utilize all of the cores. More information about Bro with PF_RING can be found here: (someone want to write a quick Bro/PF_RING tutorial to link to here? document installing kernel module, libpcap wrapper, building Bro with the --with-pcap configure option)
Netmap
^^^^^^
FreeBSD has an in-progress project named Netmap which will enable flow based load balancing as well. When it becomes viable for real world use, this document will be updated.
Click! Software Router
^^^^^^^^^^^^^^^^^^^^^^
Click! can be used for flow based load balancing with a simple configuration. (link to an example for the config). This solution is not recommended on Linux due to Bros PF_RING support and only as a last resort on other operating systems since it causes a lot of overhead due to context switching back and forth between kernel and userland several times per packet.

172
doc/cluster/index.rst Normal file
View file

@ -0,0 +1,172 @@
========================
Bro Cluster Architecture
========================
Bro is not multithreaded, so once the limitations of a single processor core
are reached the only option currently is to spread the workload across many
cores, or even many physical computers. The cluster deployment scenario for
Bro is the current solution to build these larger systems. The tools and
scripts that accompany Bro provide the structure to easily manage many Bro
processes examining packets and doing correlation activities but acting as
a singular, cohesive entity. This document describes the Bro cluster
architecture. For information on how to configure a Bro cluster,
see the documentation for
:doc:`BroControl <../components/broctl/README>`.
Architecture
---------------
The figure below illustrates the main components of a Bro cluster.
.. image:: /images/deployment.png
Tap
***
The tap is a mechanism that splits the packet stream in order to make a copy
available for inspection. Examples include the monitoring port on a switch
and an optical splitter on fiber networks.
Frontend
********
The frontend is a discrete hardware device or on-host technique that splits
traffic into many streams or flows. The Bro binary does not do this job.
There are numerous ways to accomplish this task, some of which are described
below in `Frontend Options`_.
Manager
*******
The manager is a Bro process that has two primary jobs. It receives log
messages and notices from the rest of the nodes in the cluster using the Bro
communications protocol. The result is a single log instead of many
discrete logs that you have to combine in some manner with post-processing.
The manager also takes the opportunity to de-duplicate notices, and it has the
ability to do so since it's acting as the choke point for notices and how
notices might be processed into actions (e.g., emailing, paging, or blocking).
The manager process is started first by BroControl and it only opens its
designated port and waits for connections, it doesn't initiate any
connections to the rest of the cluster. Once the workers are started and
connect to the manager, logs and notices will start arriving to the manager
process from the workers.
Proxy
*****
The proxy is a Bro process that manages synchronized state. Variables can
be synchronized across connected Bro processes automatically. Proxies help
the workers by alleviating the need for all of the workers to connect
directly to each other.
Examples of synchronized state from the scripts that ship with Bro include
the full list of "known" hosts and services (which are hosts or services
identified as performing full TCP handshakes) or an analyzed protocol has been
found on the connection. If worker A detects host 1.2.3.4 as an active host,
it would be beneficial for worker B to know that as well. So worker A shares
that information as an insertion to a set which travels to the cluster's
proxy and the proxy sends that same set insertion to worker B. The result
is that worker A and worker B have shared knowledge about host and services
that are active on the network being monitored.
The proxy model extends to having multiple proxies when necessary for
performance reasons. It only adds one additional step for the Bro processes.
Each proxy connects to another proxy in a ring and the workers are shared
between them as evenly as possible. When a proxy receives some new bit of
state it will share that with its proxy, which is then shared around the
ring of proxies, and down to all of the workers. From a practical standpoint,
there are no rules of thumb established for the number of proxies
necessary for the number of workers they are serving. It is best to start
with a single proxy and add more if communication performance problems are
found.
Bro processes acting as proxies don't tend to be extremely hard on CPU
or memory and users frequently run proxy processes on the same physical
host as the manager.
Worker
******
The worker is the Bro process that sniffs network traffic and does protocol
analysis on the reassembled traffic streams. Most of the work of an active
cluster takes place on the workers and as such, the workers typically
represent the bulk of the Bro processes that are running in a cluster.
The fastest memory and CPU core speed you can afford is recommended
since all of the protocol parsing and most analysis will take place here.
There are no particular requirements for the disks in workers since almost all
logging is done remotely to the manager, and normally very little is written
to disk.
The rule of thumb we have followed recently is to allocate approximately 1
core for every 80Mbps of traffic that is being analyzed. However, this
estimate could be extremely traffic mix-specific. It has generally worked
for mixed traffic with many users and servers. For example, if your traffic
peaks around 2Gbps (combined) and you want to handle traffic at peak load,
you may want to have 26 cores available (2048 / 80 == 25.6). If the 80Mbps
estimate works for your traffic, this could be handled by 3 physical hosts
dedicated to being workers with each one containing dual 6-core processors.
Once a flow-based load balancer is put into place this model is extremely
easy to scale. It is recommended that you estimate the amount of
hardware you will need to fully analyze your traffic. If more is needed it's
relatively easy to increase the size of the cluster in most cases.
Frontend Options
----------------
There are many options for setting up a frontend flow distributor. In many
cases it is beneficial to do multiple stages of flow distribution
on the network and on the host.
Discrete hardware flow balancers
********************************
cPacket
^^^^^^^
If you are monitoring one or more 10G physical interfaces, the recommended
solution is to use either a cFlow or cVu device from cPacket because they
are used successfully at a number of sites. These devices will perform
layer-2 load balancing by rewriting the destination Ethernet MAC address
to cause each packet associated with a particular flow to have the same
destination MAC. The packets can then be passed directly to a monitoring
host where each worker has a BPF filter to limit its visibility to only that
stream of flows, or onward to a commodity switch to split the traffic out to
multiple 1G interfaces for the workers. This greatly reduces
costs since workers can use relatively inexpensive 1G interfaces.
OpenFlow Switches
^^^^^^^^^^^^^^^^^
We are currently exploring the use of OpenFlow based switches to do flow-based
load balancing directly on the switch, which greatly reduces frontend
costs for many users. This document will be updated when we have more
information.
On host flow balancing
**********************
PF_RING
^^^^^^^
The PF_RING software for Linux has a "clustering" feature which will do
flow-based load balancing across a number of processes that are sniffing the
same interface. This allows you to easily take advantage of multiple
cores in a single physical host because Bro's main event loop is single
threaded and can't natively utilize all of the cores. If you want to use
PF_RING, see the documentation on `how to configure Bro with PF_RING
<http://bro.org/documentation/load-balancing.html>`_.
Netmap
^^^^^^
FreeBSD has an in-progress project named Netmap which will enable flow-based
load balancing as well. When it becomes viable for real world use, this
document will be updated.
Click! Software Router
^^^^^^^^^^^^^^^^^^^^^^
Click! can be used for flow based load balancing with a simple configuration.
This solution is not recommended on
Linux due to Bro's PF_RING support and only as a last resort on other
operating systems since it causes a lot of overhead due to context switching
back and forth between kernel and userland several times per packet.

28
doc/components/index.rst Normal file
View file

@ -0,0 +1,28 @@
=============
Subcomponents
=============
The following are snapshots of documentation for components that come
with this version of Bro (|version|). Since they can also be used
independently, see the `download page
<http://bro.org/download/index.html>`_ for documentation of any
current, independent component releases.
.. toctree::
:maxdepth: 1
BinPAC - A protocol parser generator <binpac/README>
Broccoli - The Bro Client Communication Library (README) <broccoli/README>
Broccoli - User Manual <broccoli/broccoli-manual>
Broccoli Python Bindings <broccoli-python/README>
Broccoli Ruby Bindings <broccoli-ruby/README>
BroControl - Interactive Bro management shell <broctl/README>
Bro-Aux - Small auxiliary tools for Bro <bro-aux/README>
BTest - A unit testing framework <btest/README>
Capstats - Command-line packet statistic tool <capstats/README>
PySubnetTree - Python module for CIDR lookups<pysubnettree/README>
trace-summary - Script for generating break-downs of network traffic <trace-summary/README>
The `Broccoli API Reference <../broccoli-api/index.html>`_ may also be of
interest.

View file

@ -12,10 +12,33 @@
import sys, os import sys, os
extensions = []
# If extensions (or modules to document with autodoc) are in another directory, # If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the # add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here. # documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinx-sources/ext')) sys.path.insert(0, os.path.abspath('sphinx_input/ext'))
# ----- Begin of BTest configuration. -----
btest = os.path.abspath("@CMAKE_SOURCE_DIR@/aux/btest")
brocut = os.path.abspath("@CMAKE_SOURCE_DIR@/aux/bro-aux/bro-cut")
bro = os.path.abspath("@CMAKE_SOURCE_DIR@/build/src")
os.environ["PATH"] += (":%s:%s/sphinx:%s:%s" % (btest, btest, bro, brocut))
sys.path.append(os.path.join(btest, "sphinx"))
extensions += ["btest-sphinx"]
btest_base="@CMAKE_SOURCE_DIR@/testing/btest"
btest_tests="doc/sphinx"
# ----- End of BTest configuration. -----
# ----- Begin of Broxygen configuration. -----
extensions += ["broxygen"]
bro_binary = os.path.abspath("@CMAKE_SOURCE_DIR@/build/src/bro")
broxygen_cache="@BROXYGEN_CACHE_DIR@"
os.environ["BROPATH"] = "@BROPATH@"
# ----- End of Broxygen configuration. -----
# -- General configuration ----------------------------------------------------- # -- General configuration -----------------------------------------------------
@ -24,10 +47,13 @@ sys.path.insert(0, os.path.abspath('sphinx-sources/ext'))
# Add any Sphinx extension module names here, as strings. They can be extensions # Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['bro', 'rst_directive', 'sphinx.ext.todo', 'adapt-toc'] extensions += ['bro', 'rst_directive', 'sphinx.ext.todo', 'adapt-toc']
os.environ["BRO_SRC_ROOT"] = "@CMAKE_SOURCE_DIR@"
os.environ["DOC_ROOT"] = "@CMAKE_SOURCE_DIR@/doc"
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
templates_path = ['sphinx-sources/_templates', 'sphinx-sources/_static'] templates_path = ['sphinx_input/_templates', 'sphinx_input/_static']
# The suffix of source filenames. # The suffix of source filenames.
source_suffix = '.rst' source_suffix = '.rst'
@ -40,7 +66,7 @@ master_doc = 'index'
# General information about the project. # General information about the project.
project = u'Bro' project = u'Bro'
copyright = u'2012, The Bro Project' copyright = u'2013, The Bro Project'
# The version info for the project you're documenting, acts as replacement for # The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the # |version| and |release|, also used in various other places throughout the
@ -63,7 +89,7 @@ today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and # List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files. # directories to ignore when looking for source files.
exclude_patterns = [] exclude_patterns = [".#*"]
# The reST default role (used for this markup: `text`) to use for all documents. # The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None #default_role = None
@ -121,7 +147,7 @@ html_theme_options = { }
# Add any paths that contain custom static files (such as style sheets) here, # Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files, # relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css". # so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['sphinx-sources/_static'] html_static_path = ['sphinx_input/_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format. # using the given strftime format.

263
doc/configuration/index.rst Normal file
View file

@ -0,0 +1,263 @@
.. _configuration:
=====================
Cluster Configuration
=====================
.. contents::
A *Bro Cluster* is a set of systems jointly analyzing the traffic of
a network link in a coordinated fashion. You can operate such a setup from
a central manager system easily using BroControl because BroControl
hides much of the complexity of the multi-machine installation.
This section gives examples of how to setup common cluster configurations
using BroControl. For a full reference on BroControl, see the
:doc:`BroControl <../components/broctl/README>` documentation.
Preparing to Setup a Cluster
============================
In this document we refer to the user account used to set up the cluster
as the "Bro user". When setting up a cluster the Bro user must be set up
on all hosts, and this user must have ssh access from the manager to all
machines in the cluster, and it must work without being prompted for a
password/passphrase (for example, using ssh public key authentication).
Also, on the worker nodes this user must have access to the target
network interface in promiscuous mode.
Additional storage must be available on all hosts under the same path,
which we will call the cluster's prefix path. We refer to this directory
as ``<prefix>``. If you build Bro from source, then ``<prefix>`` is
the directory specified with the ``--prefix`` configure option,
or ``/usr/local/bro`` by default. The Bro user must be able to either
create this directory or, where it already exists, must have write
permission inside this directory on all hosts.
When trying to decide how to configure the Bro nodes, keep in mind that
there can be multiple Bro instances running on the same host. For example,
it's possible to run a proxy and the manager on the same host. However, it is
recommended to run workers on a different machine than the manager because
workers can consume a lot of CPU resources. The maximum recommended
number of workers to run on a machine should be one or two less than
the number of CPU cores available on that machine. Using a load-balancing
method (such as PF_RING) along with CPU pinning can decrease the load on
the worker machines.
Basic Cluster Configuration
===========================
With all prerequisites in place, perform the following steps to setup
a Bro cluster (do this as the Bro user on the manager host only):
- Edit the BroControl configuration file, ``<prefix>/etc/broctl.cfg``,
and change the value of any BroControl options to be more suitable for
your environment. You will most likely want to change the value of
the ``MailTo`` and ``LogRotationInterval`` options. A complete
reference of all BroControl options can be found in the
:doc:`BroControl <../components/broctl/README>` documentation.
- Edit the BroControl node configuration file, ``<prefix>/etc/node.cfg``
to define where manager, proxies, and workers are to run. For a cluster
configuration, you must comment-out (or remove) the standalone node
in that file, and either uncomment or add node entries for each node
in your cluster (manager, proxy, and workers). For example, if you wanted
to run four Bro nodes (two workers, one proxy, and a manager) on a cluster
consisting of three machines, your cluster configuration would look like
this::
[manager]
type=manager
host=10.0.0.10
[proxy-1]
type=proxy
host=10.0.0.10
[worker-1]
type=worker
host=10.0.0.11
interface=eth0
[worker-2]
type=worker
host=10.0.0.12
interface=eth0
For a complete reference of all options that are allowed in the ``node.cfg``
file, see the :doc:`BroControl <../components/broctl/README>` documentation.
- Edit the network configuration file ``<prefix>/etc/networks.cfg``. This
file lists all of the networks which the cluster should consider as local
to the monitored environment.
- Install workers and proxies using BroControl::
> broctl install
- Some tasks need to be run on a regular basis. On the manager node,
insert a line like this into the crontab of the user running the
cluster::
0-59/5 * * * * <prefix>/bin/broctl cron
(Note: if you are editing the system crontab instead of a user's own
crontab, then you need to also specify the user which the command
will be run as. The username must be placed after the time fields
and before the broctl command.)
Note that on some systems (FreeBSD in particular), the default PATH
for cron jobs does not include the directories where bash and python
are installed (the symptoms of this problem would be that "broctl cron"
works when run directly by the user, but does not work from a cron job).
To solve this problem, you would either need to create symlinks
to bash and python in a directory that is in the default PATH for
cron jobs, or specify a new PATH in the crontab.
PF_RING Cluster Configuration
=============================
`PF_RING <http://www.ntop.org/products/pf_ring/>`_ allows speeding up the
packet capture process by installing a new type of socket in Linux systems.
It supports 10Gbit hardware packet filtering using standard network adapters,
and user-space DNA (Direct NIC Access) for fast packet capture/transmission.
Installing PF_RING
^^^^^^^^^^^^^^^^^^
1. Download and install PF_RING for your system following the instructions
`here <http://www.ntop.org/get-started/download/#PF_RING>`_. The following
commands will install the PF_RING libraries and kernel module (replace
the version number 5.6.2 in this example with the version that you
downloaded)::
cd /usr/src
tar xvzf PF_RING-5.6.2.tar.gz
cd PF_RING-5.6.2/userland/lib
./configure --prefix=/opt/pfring
make install
cd ../libpcap
./configure --prefix=/opt/pfring
make install
cd ../tcpdump-4.1.1
./configure --prefix=/opt/pfring
make install
cd ../../kernel
make install
modprobe pf_ring enable_tx_capture=0 min_num_slots=32768
Refer to the documentation for your Linux distribution on how to load the
pf_ring module at boot time. You will need to install the PF_RING
library files and kernel module on all of the workers in your cluster.
2. Download the Bro source code.
3. Configure and install Bro using the following commands::
./configure --with-pcap=/opt/pfring
make
make install
4. Make sure Bro is correctly linked to the PF_RING libpcap libraries::
ldd /usr/local/bro/bin/bro | grep pcap
libpcap.so.1 => /opt/pfring/lib/libpcap.so.1 (0x00007fa6d7d24000)
5. Configure BroControl to use PF_RING (explained below).
6. Run "broctl install" on the manager. This command will install Bro and
all required scripts to the other machines in your cluster.
Using PF_RING
^^^^^^^^^^^^^
In order to use PF_RING, you need to specify the correct configuration
options for your worker nodes in BroControl's node configuration file.
Edit the ``node.cfg`` file and specify ``lb_method=pf_ring`` for each of
your worker nodes. Next, use the ``lb_procs`` node option to specify how
many Bro processes you'd like that worker node to run, and optionally pin
those processes to certain CPU cores with the ``pin_cpus`` option (CPU
numbering starts at zero). The correct ``pin_cpus`` setting to use is
dependent on your CPU architecture (Intel and AMD systems enumerate
processors in different ways). Using the wrong ``pin_cpus`` setting
can cause poor performance. Here is what a worker node entry should
look like when using PF_RING and CPU pinning::
[worker-1]
type=worker
host=10.0.0.50
interface=eth0
lb_method=pf_ring
lb_procs=10
pin_cpus=2,3,4,5,6,7,8,9,10,11
Using PF_RING+DNA with symmetric RSS
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You must have a PF_RING+DNA license in order to do this. You can sniff
each packet only once.
1. Load the DNA NIC driver (i.e. ixgbe) on each worker host.
2. Run "ethtool -L dna0 combined 10" (this will establish 10 RSS queues
on your NIC) on each worker host. You must make sure that you set the
number of RSS queues to the same as the number you specify for the
lb_procs option in the node.cfg file.
3. On the manager, configure your worker(s) in node.cfg::
[worker-1]
type=worker
host=10.0.0.50
interface=dna0
lb_method=pf_ring
lb_procs=10
Using PF_RING+DNA with pfdnacluster_master
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You must have a PF_RING+DNA license and a libzero license in order to do
this. You can load balance between multiple applications and sniff the
same packets multiple times with different tools.
1. Load the DNA NIC driver (i.e. ixgbe) on each worker host.
2. Run "ethtool -L dna0 1" (this will establish 1 RSS queues on your NIC)
on each worker host.
3. Run the pfdnacluster_master command on each worker host. For example::
pfdnacluster_master -c 21 -i dna0 -n 10
Make sure that your cluster ID (21 in this example) matches the interface
name you specify in the node.cfg file. Also make sure that the number
of processes you're balancing across (10 in this example) matches
the lb_procs option in the node.cfg file.
4. If you are load balancing to other processes, you can use the
pfringfirstappinstance variable in broctl.cfg to set the first
application instance that Bro should use. For example, if you are running
pfdnacluster_master with "-n 10,4" you would set
pfringfirstappinstance=4. Unfortunately that's still a global setting
in broctl.cfg at the moment but we may change that to something you can
set in node.cfg eventually.
5. On the manager, configure your worker(s) in node.cfg::
[worker-1]
type=worker
host=10.0.0.50
interface=dnacluster:21
lb_method=pf_ring
lb_procs=10

View file

@ -191,6 +191,10 @@ class BroNotices(Index):
def generate(self, docnames=None): def generate(self, docnames=None):
content = {} content = {}
if 'notices' not in self.domain.env.domaindata['bro']:
return content, False
for n in self.domain.env.domaindata['bro']['notices']: for n in self.domain.env.domaindata['bro']['notices']:
modname = n[0].split("::")[0] modname = n[0].split("::")[0]
entries = content.setdefault(modname, []) entries = content.setdefault(modname, [])

317
doc/ext/broxygen.py Normal file
View file

@ -0,0 +1,317 @@
"""
Broxygen domain for Sphinx.
Adds directives that allow Sphinx to invoke Bro in order to generate script
reference documentation on the fly. The directives are:
broxygen:package
- Shows links to all scripts contained within matching package(s).
broxygen:package_index
- An index with links to matching package document(s).
broxygen:script
- Reference for matching script(s) (i.e. everything declared by the script).
broxygen:script_summary
- Shows link to matching script(s) with it's summary-section comments.
broxygen:script_index
- An index with links to all matching scrips.
broxygen:proto_analyzer
- All protocol analyzers and their components (events/bifs, etc.)
broxygen:file_analyzer
- All file analyzers and their components (events/bifs, etc.)
"""
from sphinx.domains import Domain, ObjType
from sphinx.locale import l_
from docutils.parsers.rst.directives.misc import Include
App = None
def info(msg):
"""Use Sphinx builder to output a console message."""
global App
from sphinx.util.console import blue
App.builder.info(blue(msg))
def pattern_to_filename_component(pattern):
"""Replace certain characters in Broxygen config file target pattern.
Such that it can be used as part of a (sane) filename.
"""
return pattern.replace("/", ".").replace("*", "star")
def ensure_dir(path):
"""Should act like ``mkdir -p``."""
import os
import errno
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def generate_config(env, type, pattern):
"""Create a Broxygen config file for a particular target.
It can be used by Bro to generate reST docs for that target.
"""
import os
import tempfile
from sphinx.errors import SphinxError
work_dir = env.config.broxygen_cache
if not work_dir:
raise SphinxError("broxygen_cache not set in sphinx config file")
ensure_dir(work_dir)
prefix = "{0}-{1}-".format(type, pattern_to_filename_component(pattern))
(fd, cfg) = tempfile.mkstemp(suffix=".cfg", prefix=prefix, dir=work_dir)
generated_file = "{0}.rst".format(cfg)
config = "{0}\t{1}\t{2}".format(type, pattern, generated_file)
f = os.fdopen(fd, "w")
f.write(config)
f.close()
return (cfg, generated_file)
def generate_target(env, type, pattern):
"""Create a Broxygen target and build it.
For a target which hasn't been referenced by any other script, this function
creates an associated config file then uses Bro w/ it to build the target
and stores the target information in the build environment.
If a script references a target that's already found in the build
environment the results of the previous built are re-used.
"""
app_data = env.domaindata["broxygen"]
if (type, pattern) in app_data["targets"]:
info("Broxygen has cached doc for target '{0} {1}'".format(
type, pattern))
return app_data["targets"]
(cfg, gend_file) = generate_config(env, type, pattern)
target = BroxygenTarget(type, pattern, cfg, gend_file)
app_data["targets"][(type, pattern)] = target
build_target(env, target)
info("Broxygen built target '{0} {1}'".format(type, pattern))
return target
def build_target(env, target):
"""Invoke a Bro process to build a Broxygen target."""
import os
import subprocess
path_to_bro = env.config.bro_binary
if not path_to_bro:
raise SphinxError("'bro' not set in sphinx config file (path to bro)")
bro_cmd = "{0} -X {1} broxygen".format(path_to_bro, target.config_file)
cwd = os.getcwd()
os.chdir(os.path.dirname(target.config_file))
try:
subprocess.check_output(bro_cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
from sphinx.errors import SphinxError
raise SphinxError(
"Command '{0}' returned non-zero exit status {1}: {2}".format(
e.cmd, e.returncode, e.output))
finally:
os.chdir(cwd)
class BroxygenTarget(object):
"""Some portion of reST documentation that Bro knows how to generate.
A target is identified by its type and pattern. E.g. type "script" and
pattern "broxygen/example.bro".
"""
def __init__(self, type, pattern, config_file, generated_file):
self.type = type
self.pattern = pattern
self.config_file = config_file
self.generated_file = generated_file
self.used_in_docs = set()
class BroxygenDirective(Include):
"""Base class for Broxygen directives.
It can use Bro to generate reST documentation on the fly and embed it in
the document at the location of the directive just like the ``.. include::``
directive. The only argument is a pattern to identify to Bro which
pieces of documentation it needs to create.
"""
required_arguments = 1
has_content = False
target_type = None
def run(self):
env = self.state.document.settings.env
info("Broxygen running .. {0}:: {1} in {2}".format(
self.name, self.arguments[0], env.docname))
target = generate_target(env, self.target_type, self.arguments[0])
target.used_in_docs.add(env.docname)
self.arguments = [target.generated_file]
return super(BroxygenDirective, self).run()
class PackageDirective(BroxygenDirective):
target_type = "package"
class PackageIndexDirective(BroxygenDirective):
target_type = "package_index"
class ScriptDirective(BroxygenDirective):
target_type = "script"
class ScriptSummaryDirective(BroxygenDirective):
target_type = "script_summary"
class ScriptIndexDirective(BroxygenDirective):
target_type = "script_index"
class ProtoAnalyzerDirective(BroxygenDirective):
target_type = "proto_analyzer"
class FileAnalyzerDirective(BroxygenDirective):
target_type = "file_analyzer"
class IdentifierDirective(BroxygenDirective):
target_type = "identifier"
class BroxygenDomain(Domain):
name = "broxygen"
label = "Broxygen"
object_types = {
"package": ObjType(l_("package")),
"package_index": ObjType(l_("package_index")),
"script": ObjType(l_("script")),
"script_summary": ObjType(l_("script_summary")),
"script_index": ObjType(l_("script_index")),
"proto_analyzer": ObjType(l_("proto_analyzer")),
"file_analyzer": ObjType(l_("file_analyzer")),
"identifier": ObjType(l_("identifier")),
}
directives = {
"package": PackageDirective,
"package_index": PackageIndexDirective,
"script": ScriptDirective,
"script_summary": ScriptSummaryDirective,
"script_index": ScriptIndexDirective,
"proto_analyzer": ProtoAnalyzerDirective,
"file_analyzer": FileAnalyzerDirective,
"identifier": IdentifierDirective,
}
roles = {}
initial_data = {
"targets": {}
}
def clear_doc(self, docname):
"""Update Broxygen targets referenced in docname.
If it's the last place the target was referenced, remove it from
the build environment and delete any generated config/reST files
associated with it from the cache.
"""
import os
stale_targets = []
for (type, pattern), target in self.data["targets"].items():
if docname in target.used_in_docs:
target.used_in_docs.remove(docname)
if not target.used_in_docs:
stale_targets.append(target)
for target in stale_targets:
del self.data["targets"][(target.type, target.pattern)]
os.remove(target.config_file)
os.remove(target.generated_file)
def get_objects(self):
"""No Broxygen-generated content is itself linkable/searchable."""
return []
def env_get_outdated_hook(app, env, added, changed, removed):
"""Check whether to re-read any documents referencing Broxygen targets.
To do that we have to ask Bro to rebuild each target and compare the
before and after modification times of the generated reST output file.
If Bro changed it, then the document containing the Broxygen directive
needs to be re-read.
"""
import os
reread = set()
for target in app.env.domaindata["broxygen"]["targets"].values():
before_mtime = os.stat(target.generated_file)
build_target(env, target)
after_mtime = os.stat(target.generated_file)
if after_mtime > before_mtime:
info("Broxygen target '{0} {1}' outdated".format(
target.type, target.pattern))
for docname in target.used_in_docs:
if docname not in removed:
info(" in document: {0}".format(docname))
reread.add(docname)
return list(reread)
def setup(app):
global App
App = app
app.add_domain(BroxygenDomain)
app.add_config_value("bro_binary", None, "env")
app.add_config_value("broxygen_cache", None, "env")
app.connect("env-get-outdated", env_get_outdated_hook)

View file

@ -1,180 +0,0 @@
==========================
Frequently Asked Questions
==========================
.. raw:: html
<div class="faq">
.. contents::
Installation and Configuration
==============================
How can I tune my operating system for best capture performance?
----------------------------------------------------------------
Here are some pointers to more information:
* Fabian Schneider's research on `high performance packet capture
<http://www.net.t-labs.tu-berlin.de/research/hppc>`_
* `NSMWiki <http://nsmwiki.org/Main_Page>`_ has page on
*Collecting Data*.
* An `IMC 2010 paper
<http://conferences.sigcomm.org/imc/2010/papers/p206.pdf>`_ by
Lothar Braun et. al evaluates packet capture performance on
commodity hardware
Are there any gotchas regarding interface configuration for live capture? Or why might I be seeing abnormally large packets much greater than interface MTU?
-------------------------------------------------------------------------------------------------------------------------------------------------------------
Some NICs offload the reassembly of traffic into "superpackets" so that
fewer packets are then passed up the stack (e.g. "TCP segmentation
offload", or "generic segmentation offload"). The result is that the
capturing application will observe packets much larger than the MTU size
of the interface they were captured from and may also interfere with the
maximum packet capture length, ``snaplen``, so it's a good idea to disable
an interface's offloading features.
You can use the ``ethtool`` program on Linux to view and disable
offloading features of an interface. See this page for more explicit
directions:
http://securityonion.blogspot.com/2011/10/when-is-full-packet-capture-not-full.html
What does an error message like ``internal error: NB-DNS error`` mean?
----------------------------------------------------------------------
That often means that DNS is not set up correctly on the system
running Bro. Try verifying from the command line that DNS lookups
work, e.g., ``host www.google.com``.
I am using OpenBSD and having problems installing Bro?
------------------------------------------------------
One potential issue is that the top-level Makefile may not work with
OpenBSD's default make program, in which case you can either install
the ``gmake`` package and use it instead or first change into the
``build/`` directory before doing either ``make`` or ``make install``
such that the CMake-generated Makefile's are used directly.
Generally, please note that we do not regularly test OpenBSD builds.
We appreciate any patches that improve Bro's support for this
platform.
How do BroControl options affect Bro script variables?
------------------------------------------------------
Some (but not all) BroControl options override a corresponding Bro script variable.
For example, setting the BroControl option "LogRotationInterval" will override
the value of the Bro script variable "Log::default_rotation_interval".
See the :doc:`BroControl Documentation <components/broctl/README>` to find out
which BroControl options override Bro script variables, and for more discussion
on site-specific customization.
Usage
=====
How can I identify backscatter?
-------------------------------
Identifying backscatter via connections labeled as ``OTH`` is not a reliable
means to detect backscatter. Backscatter is however visible by interpreting
the contents of the ``history`` field in the ``conn.log`` file. The basic idea
is to watch for connections that never had an initial ``SYN`` but started
instead with a ``SYN-ACK`` or ``RST`` (though this latter generally is just
discarded). Here are some history fields which provide backscatter examples:
``hAFf``, ``r``. Refer to the conn protocol analysis scripts to interpret the
individual character meanings in the history field.
Is there help for understanding Bro's resource consumption?
-----------------------------------------------------------
There are two scripts that collect statistics on resource usage:
``misc/stats.bro`` and ``misc/profiling.bro``. The former is quite
lightweight, while the latter should only be used for debugging.
How can I capture packets as an unprivileged user?
--------------------------------------------------
Normally, unprivileged users cannot capture packets from a network interface,
which means they would not be able to use Bro to read/analyze live traffic.
However, there are operating system specific ways to enable packet capture
permission for non-root users, which is worth doing in the context of using
Bro to monitor live traffic.
With Linux Capabilities
^^^^^^^^^^^^^^^^^^^^^^^
Fully implemented since Linux kernel 2.6.24, capabilities are a way of
parceling superuser privileges into distinct units. Attach capabilities
required to capture packets to the ``bro`` executable file like this:
.. console::
sudo setcap cap_net_raw,cap_net_admin=eip /path/to/bro
Now any unprivileged user should have the capability to capture packets
using Bro provided that they have the traditional file permissions to
read/execute the ``bro`` binary.
With BPF Devices
^^^^^^^^^^^^^^^^
Systems using Berkeley Packet Filter (BPF) (e.g. FreeBSD & Mac OS X)
can allow users with read access to a BPF device to capture packets from
it using libpcap.
* Example of manually changing BPF device permissions to allow users in
the ``admin`` group to capture packets:
.. console::
sudo chgrp admin /dev/bpf*
sudo chmod g+r /dev/bpf*
* Example of configuring devfs to set permissions of BPF devices, adding
entries to ``/etc/devfs.conf`` to grant ``admin`` group permission to
capture packets:
.. console::
sudo sh -c 'echo "own bpf root:admin" >> /etc/devfs.conf'
sudo sh -c 'echo "perm bpf 0640" >> /etc/devfs.conf'
sudo service devfs restart
.. note:: As of Mac OS X 10.6, the BPF device is on devfs, but the used version
of devfs isn't capable of setting the device permissions. The permissions
can be changed manually, but they will not survive a reboot.
Why isn't Bro producing the logs I expect? (A Note About Checksums)
-------------------------------------------------------------------
Normally, Bro's event engine will discard packets which don't have valid
checksums. This can be a problem if one wants to analyze locally
generated/captured traffic on a system that offloads checksumming to the
network adapter. In that case, all transmitted/captured packets will have
bad checksums because they haven't yet been calculated by the NIC, thus
such packets will not undergo analysis defined in Bro policy scripts as they
normally would. Bad checksums in traces may also be a result of some packet
alteration tools.
Bro has two options to workaround such situations and ignore bad checksums:
1) The ``-C`` command line option to ``bro``.
2) An option called ``ignore_checksums`` that can be redefined at the
policy script layer (e.g. in your ``$PREFIX/share/bro/site/local.bro``):
.. code:: bro
redef ignore_checksums = T;
The other alternative is to disable checksum offloading for your
network adapter, but this is not always possible or desirable.
.. raw:: html
</div>

View file

@ -1,3 +1,6 @@
.. _file-analysis-framework:
============= =============
File Analysis File Analysis
============= =============
@ -31,40 +34,13 @@ some information about the file such as which network
:bro:see:`connection` and protocol are transporting the file, how many :bro:see:`connection` and protocol are transporting the file, how many
bytes have been transferred so far, and its MIME type. bytes have been transferred so far, and its MIME type.
.. code:: bro Here's a simple example:
event connection_state_remove(c: connection) .. btest-include:: ${DOC_ROOT}/frameworks/file_analysis_01.bro
{
print "connection_state_remove";
print c$uid;
print c$id;
for ( s in c$service )
print s;
}
event file_state_remove(f: fa_file) .. btest:: file-analysis-01
{
print "file_state_remove";
print f$id;
for ( cid in f$conns )
{
print f$conns[cid]$uid;
print cid;
}
print f$source;
}
might give output like:: @TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/get.trace ${DOC_ROOT}/frameworks/file_analysis_01.bro
file_state_remove
Cx92a0ym5R8
REs2LQfVW2j
[orig_h=10.0.0.7, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]
HTTP
connection_state_remove
REs2LQfVW2j
[orig_h=10.0.0.7, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp]
HTTP
This doesn't perform any interesting analysis yet, but does highlight This doesn't perform any interesting analysis yet, but does highlight
the similarity between analysis of connections and files. Connections the similarity between analysis of connections and files. Connections
@ -82,57 +58,47 @@ attached, they start receiving the contents of the file as Bro extracts
it from an ongoing network connection. What they do with the file it from an ongoing network connection. What they do with the file
contents is up to the particular file analyzer implementation, but contents is up to the particular file analyzer implementation, but
they'll typically either report further information about the file via they'll typically either report further information about the file via
events (e.g. :bro:see:`FileAnalysis::ANALYZER_MD5` will report the events (e.g. :bro:see:`Files::ANALYZER_MD5` will report the
file's MD5 checksum via :bro:see:`file_hash` once calculated) or they'll file's MD5 checksum via :bro:see:`file_hash` once calculated) or they'll
have some side effect (e.g. :bro:see:`FileAnalysis::ANALYZER_EXTRACT` have some side effect (e.g. :bro:see:`Files::ANALYZER_EXTRACT`
will write the contents of the file out to the local file system). will write the contents of the file out to the local file system).
In the future there may be file analyzers that automatically attach to In the future there may be file analyzers that automatically attach to
files based on heuristics, similar to the Dynamic Protocol Detection files based on heuristics, similar to the Dynamic Protocol Detection
(DPD) framework for connections, but many will always require an (DPD) framework for connections, but many will always require an
explicit attachment decision: explicit attachment decision.
.. code:: bro Here's a simple example of how to use the MD5 file analyzer to
calculate the MD5 of plain text files:
event file_new(f: fa_file) .. btest-include:: ${DOC_ROOT}/frameworks/file_analysis_02.bro
{
print "new file", f$id;
if ( f?$mime_type && f$mime_type == "text/plain" )
FileAnalysis::add_analyzer(f, [$tag=FileAnalysis::ANALYZER_MD5]);
}
event file_hash(f: fa_file, kind: string, hash: string) .. btest:: file-analysis-02
{
print "file_hash", f$id, kind, hash;
}
this script calculates MD5s for all plain text files and might give @TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/get.trace ${DOC_ROOT}/frameworks/file_analysis_02.bro
output::
new file, Cx92a0ym5R8
file_hash, Cx92a0ym5R8, md5, 397168fd09991a0e712254df7bc639ac
Some file analyzers might have tunable parameters that need to be Some file analyzers might have tunable parameters that need to be
specified in the call to :bro:see:`FileAnalysis::add_analyzer`: specified in the call to :bro:see:`Files::add_analyzer`:
.. code:: bro .. code:: bro
event file_new(f: fa_file) event file_new(f: fa_file)
{ {
FileAnalysis::add_analyzer(f, [$tag=FileAnalysis::ANALYZER_EXTRACT, Files::add_analyzer(f, Files::ANALYZER_EXTRACT,
$extract_filename="./myfile"]); [$extract_filename="myfile"]);
} }
In this case, the file extraction analyzer doesn't generate any further In this case, the file extraction analyzer doesn't generate any further
events, but does have the side effect of writing out the file contents events, but does have the effect of writing out the file contents to the
to the local file system at the specified location of ``./myfile``. Of local file system at the location resulting from the concatenation of
course, for a network with more than a single file being transferred, the path specified by :bro:see:`FileExtract::prefix` and the string,
it's probably preferable to specify a different extraction path for each ``myfile``. Of course, for a network with more than a single file being
file, unlike this example. transferred, it's probably preferable to specify a different extraction
path for each file, unlike this example.
Regardless of which file analyzers end up acting on a file, general Regardless of which file analyzers end up acting on a file, general
information about the file (e.g. size, time of last data transferred, information about the file (e.g. size, time of last data transferred,
MIME type, etc.) are logged in ``file_analysis.log``. MIME type, etc.) are logged in ``files.log``.
Input Framework Integration Input Framework Integration
=========================== ===========================
@ -143,41 +109,19 @@ in the same way it analyzes files that it sees coming over traffic from
a network interface it's monitoring. It only requires a call to a network interface it's monitoring. It only requires a call to
:bro:see:`Input::add_analysis`: :bro:see:`Input::add_analysis`:
.. code:: bro .. btest-include:: ${DOC_ROOT}/frameworks/file_analysis_03.bro
redef exit_only_after_terminate = T;
event file_new(f: fa_file)
{
print "new file", f$id;
FileAnalysis::add_analyzer(f, [$tag=FileAnalysis::ANALYZER_MD5]);
}
event file_state_remove(f: fa_file)
{
Input::remove(f$source);
terminate();
}
event file_hash(f: fa_file, kind: string, hash: string)
{
print "file_hash", f$id, kind, hash;
}
event bro_init()
{
local source: string = "./myfile";
Input::add_analysis([$source=source, $name=source]);
}
Note that the "source" field of :bro:see:`fa_file` corresponds to the Note that the "source" field of :bro:see:`fa_file` corresponds to the
"name" field of :bro:see:`Input::AnalysisDescription` since that is what "name" field of :bro:see:`Input::AnalysisDescription` since that is what
the input framework uses to uniquely identify an input stream. the input framework uses to uniquely identify an input stream.
The output of the above script may be:: The output of the above script may be (assuming a file called "myfile"
exists):
new file, G1fS2xthS4l .. btest:: file-analysis-03
file_hash, G1fS2xthS4l, md5, 54098b367d2e87b078671fad4afb9dbb
@TEST-EXEC: echo "Hello world" > myfile
@TEST-EXEC: btest-rst-cmd bro ${DOC_ROOT}/frameworks/file_analysis_03.bro
Nothing that special, but it at least verifies the MD5 file analyzer Nothing that special, but it at least verifies the MD5 file analyzer
saw all the bytes of the input file and calculated the checksum saw all the bytes of the input file and calculated the checksum

View file

@ -0,0 +1,20 @@
event connection_state_remove(c: connection)
{
print "connection_state_remove";
print c$uid;
print c$id;
for ( s in c$service )
print s;
}
event file_state_remove(f: fa_file)
{
print "file_state_remove";
print f$id;
for ( cid in f$conns )
{
print f$conns[cid]$uid;
print cid;
}
print f$source;
}

View file

@ -0,0 +1,11 @@
event file_new(f: fa_file)
{
print "new file", f$id;
if ( f?$mime_type && f$mime_type == "text/plain" )
Files::add_analyzer(f, Files::ANALYZER_MD5);
}
event file_hash(f: fa_file, kind: string, hash: string)
{
print "file_hash", f$id, kind, hash;
}

View file

@ -0,0 +1,25 @@
redef exit_only_after_terminate = T;
event file_new(f: fa_file)
{
print "new file", f$id;
Files::add_analyzer(f, Files::ANALYZER_MD5);
}
event file_state_remove(f: fa_file)
{
print "file_state_remove";
Input::remove(f$source);
terminate();
}
event file_hash(f: fa_file, kind: string, hash: string)
{
print "file_hash", f$id, kind, hash;
}
event bro_init()
{
local source: string = "./myfile";
Input::add_analysis([$source=source, $name=source]);
}

View file

@ -1,4 +1,6 @@
.. _geolocation:
=========== ===========
GeoLocation GeoLocation
=========== ===========
@ -9,10 +11,41 @@ GeoLocation
to find the geographic location for an IP address. Bro has support to find the geographic location for an IP address. Bro has support
for the `GeoIP library <http://www.maxmind.com/app/c>`__ at the for the `GeoIP library <http://www.maxmind.com/app/c>`__ at the
policy script level beginning with release 1.3 to account for this policy script level beginning with release 1.3 to account for this
need. need. To use this functionality, you need to first install the libGeoIP
software, and then install the GeoLite city database before building
Bro.
.. contents:: .. contents::
Install libGeoIP
----------------
* FreeBSD:
.. console::
sudo pkg_add -r GeoIP
* RPM/RedHat-based Linux:
.. console::
sudo yum install GeoIP-devel
* DEB/Debian-based Linux:
.. console::
sudo apt-get install libgeoip-dev
* Mac OS X:
Vanilla OS X installations don't ship with libGeoIP, but if
installed from your preferred package management system (e.g.
MacPorts, Fink, or Homebrew), they should be automatically detected
and Bro will compile against them.
GeoIPLite Database Installation GeoIPLite Database Installation
------------------------------------ ------------------------------------
@ -20,39 +53,23 @@ A country database for GeoIPLite is included when you do the C API
install, but for Bro, we are using the city database which includes install, but for Bro, we are using the city database which includes
cities and regions in addition to countries. cities and regions in addition to countries.
`Download <http://www.maxmind.com/app/geolitecity>`__ the geolitecity `Download <http://www.maxmind.com/app/geolitecity>`__ the GeoLite city
binary database and follow the directions to install it. binary database.
FreeBSD Quick Install .. console::
---------------------
.. console::
pkg_add -r GeoIP
wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
gunzip GeoLiteCity.dat.gz gunzip GeoLiteCity.dat.gz
mv GeoLiteCity.dat /usr/local/share/GeoIP/GeoIPCity.dat
Next, the file needs to be put in the database directory. This directory
should already exist and will vary depending on which platform and package
you are using. For FreeBSD, use ``/usr/local/share/GeoIP``. For Linux,
use ``/usr/share/GeoIP`` or ``/var/lib/GeoIP`` (choose whichever one
already exists).
# Set your environment correctly before running Bro's configure script .. console::
export CFLAGS=-I/usr/local/include
export LDFLAGS=-L/usr/local/lib
mv GeoLiteCity.dat <path_to_database_dir>/GeoIPCity.dat
CentOS Quick Install
--------------------
.. console::
yum install GeoIP-devel
wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
gunzip GeoLiteCity.dat.gz
mkdir -p /var/lib/GeoIP/
mv GeoLiteCity.dat /var/lib/GeoIP/GeoIPCity.dat
# Set your environment correctly before running Bro's configure script
export CFLAGS=-I/usr/local/include
export LDFLAGS=-L/usr/local/lib
Usage Usage
@ -65,8 +82,8 @@ functionality:
function lookup_location(a:addr): geo_location function lookup_location(a:addr): geo_location
There is also the ``geo_location`` data structure that is returned There is also the :bro:see:`geo_location` data structure that is returned
from the ``lookup_location`` function: from the :bro:see:`lookup_location` function:
.. code:: bro .. code:: bro

17
doc/frameworks/index.rst Normal file
View file

@ -0,0 +1,17 @@
==========
Frameworks
==========
.. toctree::
:maxdepth: 1
file-analysis
geoip
input
intel
logging
notice
signatures
sumstats

View file

@ -1,6 +1,9 @@
==============================================
Loading Data into Bro with the Input Framework .. _framework-input:
==============================================
===============
Input Framework
===============
.. rst-class:: opening .. rst-class:: opening
@ -259,8 +262,13 @@ to optimize the speed of the input framework. It can generate arbitrary
amounts of semi-random data in all Bro data types supported by the input amounts of semi-random data in all Bro data types supported by the input
framework. framework.
In the future, the input framework will get support for new data sources Currently, Bro supports the following readers in addition to the
like, for example, different databases. aforementioned ones:
.. toctree::
:maxdepth: 1
logging-input-sqlite
Add_table options Add_table options
----------------- -----------------

View file

@ -1,5 +1,7 @@
Intel Framework
=============== ======================
Intelligence Framework
======================
Intro Intro
----- -----
@ -27,10 +29,7 @@ Quick Start
Load the package of scripts that sends data into the Intelligence Load the package of scripts that sends data into the Intelligence
Framework to be checked by loading this script in local.bro:: Framework to be checked by loading this script in local.bro::
@load policy/frameworks/intel @load policy/frameworks/intel/seen
(TODO: find some good mechanism for getting setup with good data
quickly)
Refer to the "Loading Intelligence" section below to see the format Refer to the "Loading Intelligence" section below to see the format
for Intelligence Framework text files, then load those text files with for Intelligence Framework text files, then load those text files with
@ -60,17 +59,15 @@ intelligence framework has distribution mechanisms which will push
data out to all of the nodes that need it. data out to all of the nodes that need it.
Here is an example of the intelligence data format. Note that all Here is an example of the intelligence data format. Note that all
whitespace separators are literal tabs and fields containing only a whitespace field separators are literal tabs and fields containing only a
hyphen a considered to be null values.:: hyphen are considered to be null values. ::
#fields host net str str_type meta.source meta.desc meta.url #fields indicator indicator_type meta.source meta.desc meta.url
1.2.3.4 - - - source1 Sending phishing email http://source1.com/badhosts/1.2.3.4 1.2.3.4 Intel::ADDR source1 Sending phishing email http://source1.com/badhosts/1.2.3.4
- 31.131.248.0/21 - - spamhaus-drop SBL154982 - - a.b.com Intel::DOMAIN source2 Name used for data exfiltration -
- - a.b.com Intel::DOMAIN source2 Name used for data exfiltration -
For more examples of built in `str_type` values, please refer to the For a list of all built-in `indicator_type` values, please refer to the
autogenerated documentation for the intelligence framework (TODO: documentation of :bro:see:`Intel::Type`.
figure out how to do this link).
To load the data once files are created, use the following example To load the data once files are created, use the following example
code to define files to load with your own file names of course:: code to define files to load with your own file names of course::
@ -90,8 +87,7 @@ When some bit of data is extracted (such as an email address in the
"From" header in a message over SMTP), the Intelligence Framework "From" header in a message over SMTP), the Intelligence Framework
needs to be informed that this data was discovered and it's presence needs to be informed that this data was discovered and it's presence
should be checked within the intelligence data set. This is should be checked within the intelligence data set. This is
accomplished through the Intel::seen (TODO: do a reference link) accomplished through the :bro:see:`Intel::seen` function.
function.
Typically users won't need to work with this function due to built in Typically users won't need to work with this function due to built in
hook scripts that Bro ships with that will "see" data and send it into hook scripts that Bro ships with that will "see" data and send it into
@ -99,14 +95,14 @@ the intelligence framework. A user may only need to load the entire
package of hook scripts as a module or pick and choose specific package of hook scripts as a module or pick and choose specific
scripts to load. Keep in mind that as more data is sent into the scripts to load. Keep in mind that as more data is sent into the
intelligence framework, the CPU load consumed by Bro will increase intelligence framework, the CPU load consumed by Bro will increase
depending on how many times the Intel::seen function is being called depending on how many times the :bro:see:`Intel::seen` function is
which is heavily traffic dependent. being called which is heavily traffic dependent.
The full package of hook scripts that Bro ships with for sending this The full package of hook scripts that Bro ships with for sending this
"seen" data into the intelligence framework can be loading by adding "seen" data into the intelligence framework can be loading by adding
this line to local.bro:: this line to local.bro::
@load policy/frameworks/intel @load policy/frameworks/intel/seen
Intelligence Matches Intelligence Matches
******************** ********************
@ -114,12 +110,12 @@ Intelligence Matches
Against all hopes, most networks will eventually have a hit on Against all hopes, most networks will eventually have a hit on
intelligence data which could indicate a possible compromise or other intelligence data which could indicate a possible compromise or other
unwanted activity. The Intelligence Framework provides an event that unwanted activity. The Intelligence Framework provides an event that
is generated whenever a match is discovered named Intel::match (TODO: is generated whenever a match is discovered named :bro:see:`Intel::match`.
make a link to inline docs). Due to design restrictions placed upon Due to design restrictions placed upon
the intelligence framework, there is no assurance as to where this the intelligence framework, there is no assurance as to where this
event will be generated. It could be generated on the worker where event will be generated. It could be generated on the worker where
the data was seen or on the manager. When the Intel::match event is the data was seen or on the manager. When the ``Intel::match`` event is
handled, only the data given as event arguments to the event can be handled, only the data given as event arguments to the event can be
assured since the host where the data was seen may not be where assured since the host where the data was seen may not be where
Intel::match is handled. ``Intel::match`` is handled.

View file

@ -104,7 +104,7 @@ code like this to your ``local.bro``:
} }
Bro's DataSeries writer comes with a few tuning options, see Bro's DataSeries writer comes with a few tuning options, see
:doc:`scripts/base/frameworks/logging/writers/dataseries`. :doc:`/scripts/base/frameworks/logging/writers/dataseries.bro`.
Working with DataSeries Working with DataSeries
======================= =======================

View file

@ -25,18 +25,18 @@ respond successfully to the insertion request.
Installing ElasticSearch Installing ElasticSearch
------------------------ ------------------------
Download the latest version from: <http://www.elasticsearch.org/download/>. Download the latest version from: http://www.elasticsearch.org/download/.
Once extracted, start ElasticSearch with:: Once extracted, start ElasticSearch with::
# ./bin/elasticsearch # ./bin/elasticsearch
For more detailed information, refer to the ElasticSearch installation For more detailed information, refer to the ElasticSearch installation
documentation: http://www.elasticsearch.org/guide/reference/setup/installation.html documentation: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html
Compiling Bro with ElasticSearch Support Compiling Bro with ElasticSearch Support
---------------------------------------- ----------------------------------------
First, ensure that you have libcurl installed the run configure.:: First, ensure that you have libcurl installed then run configure::
# ./configure # ./configure
[...] [...]
@ -51,9 +51,9 @@ First, ensure that you have libcurl installed the run configure.::
Activating ElasticSearch Activating ElasticSearch
------------------------ ------------------------
The easiest way to enable ElasticSearch output is to load the tuning/logs-to- The easiest way to enable ElasticSearch output is to load the
elasticsearch.bro script. If you are using BroControl, the following line in tuning/logs-to-elasticsearch.bro script. If you are using BroControl,
local.bro will enable it. the following line in local.bro will enable it:
.. console:: .. console::
@ -76,7 +76,7 @@ A common problem encountered with ElasticSearch is too many files being held
open. The ElasticSearch website has some suggestions on how to increase the open. The ElasticSearch website has some suggestions on how to increase the
open file limit. open file limit.
- http://www.elasticsearch.org/tutorials/2011/04/06/too-many-open-files.html - http://www.elasticsearch.org/tutorials/too-many-open-files/
TODO TODO
---- ----

View file

@ -0,0 +1,166 @@
============================================
Logging To and Reading From SQLite Databases
============================================
.. rst-class:: opening
Starting with version 2.2, Bro features a SQLite logging writer
as well as a SQLite input reader. SQLite is a simple, file-based,
widely used SQL database system. Using SQLite allows Bro to write
and access data in a format that is easy to use in interchange with
other applications. Due to the transactional nature of SQLite,
databases can be used by several applications simultaneously. Hence,
they can, for example, be used to make data that changes regularly available
to Bro on a continuing basis.
.. contents::
Warning
=======
In contrast to the ASCII reader and writer, the SQLite plugins have not yet
seen extensive use in production environments. While we are not aware
of any issues with them, we urge to caution when using them
in production environments. There could be lingering issues which only occur
when the plugins are used with high amounts of data or in high-load environments.
Logging Data into SQLite Databases
==================================
Logging support for SQLite is available in all Bro installations starting with
version 2.2. There is no need to load any additional scripts or for any compile-time
configurations.
Sending data from existing logging streams to SQLite is rather straightforward. You
have to define a filter which specifies SQLite as the writer.
The following example code adds SQLite as a filter for the connection log:
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-conn-filter.bro
.. btest:: sqlite-conn-filter-check
# Make sure this parses correctly at least.
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-conn-filter.bro
Bro will create the database file ``/var/db/conn.sqlite``, if it does not already exist.
It will also create a table with the name ``conn`` (if it does not exist) and start
appending connection information to the table.
At the moment, SQLite databases are not rotated the same way ASCII log-files are. You
have to take care to create them in an adequate location.
If you examine the resulting SQLite database, the schema will contain the same fields
that are present in the ASCII log files::
# sqlite3 /var/db/conn.sqlite
SQLite version 3.8.0.2 2013-09-03 17:11:13
Enter ".help" for instructions
Enter SQL statements terminated with a ";"
sqlite> .schema
CREATE TABLE conn (
'ts' double precision,
'uid' text,
'id.orig_h' text,
'id.orig_p' integer,
...
Note that the ASCII ``conn.log`` will still be created. To disable the ASCII writer for a
log stream, you can remove the default filter:
.. code:: bro
Log::remove_filter(Conn::LOG, "default");
To create a custom SQLite log file, you have to create a new log stream that contains
just the information you want to commit to the database. Please refer to the
:ref:`framework-logging` documentation on how to create custom log streams.
Reading Data from SQLite Databases
==================================
Like logging support, support for reading data from SQLite databases is built into Bro starting
with version 2.2.
Just as with the text-based input readers (please refer to the :ref:`framework-input`
documentation for them and for basic information on how to use the input-framework), the SQLite reader
can be used to read data - in this case the result of SQL queries - into tables or into events.
Reading Data into Tables
------------------------
To read data from a SQLite database, we first have to provide Bro with the information, how
the resulting data will be structured. For this example, we expect that we have a SQLite database,
which contains host IP addresses and the user accounts that are allowed to log into a specific
machine.
The SQLite commands to create the schema are as follows::
create table machines_to_users (
host text unique not null,
users text not null);
insert into machines_to_users values ('192.168.17.1', 'bernhard,matthias,seth');
insert into machines_to_users values ('192.168.17.2', 'bernhard');
insert into machines_to_users values ('192.168.17.3', 'seth,matthias');
After creating a file called ``hosts.sqlite`` with this content, we can read the resulting table
into Bro:
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-read-table.bro
.. btest:: sqlite-read-table-check
# Make sure this parses correctly at least.
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-read-table.bro
Afterwards, that table can be used to check logins into hosts against the available
userlist.
Turning Data into Events
------------------------
The second mode is to use the SQLite reader to output the input data as events. Typically there
are two reasons to do this. First, when the structure of the input data is too complicated
for a direct table import. In this case, the data can be read into an event which can then
create the necessary data structures in Bro in scriptland.
The second reason is, that the dataset is too big to hold it in memory. In this case, the checks
can be performed on-demand, when Bro encounters a situation where it needs additional information.
An example for this would be an internal huge database with malware hashes. Live database queries
could be used to check the sporadically happening downloads against the database.
The SQLite commands to create the schema are as follows::
create table malware_hashes (
hash text unique not null,
description text not null);
insert into malware_hashes values ('86f7e437faa5a7fce15d1ddcb9eaeaea377667b8', 'malware a');
insert into malware_hashes values ('e9d71f5ee7c92d6dc9e92ffdad17b8bd49418f98', 'malware b');
insert into malware_hashes values ('84a516841ba77a5b4648de2cd0dfcb30ea46dbb4', 'malware c');
insert into malware_hashes values ('3c363836cf4e16666669a25da280a1865c2d2874', 'malware d');
insert into malware_hashes values ('58e6b3a414a1e090dfc6029add0f3555ccba127f', 'malware e');
insert into malware_hashes values ('4a0a19218e082a343a1b17e5333409af9d98f0f5', 'malware f');
insert into malware_hashes values ('54fd1711209fb1c0781092374132c66e79e2241b', 'malware g');
insert into malware_hashes values ('27d5482eebd075de44389774fce28c69f45c8a75', 'malware h');
insert into malware_hashes values ('73f45106968ff8dc51fba105fa91306af1ff6666', 'ftp-trace');
The following code uses the file-analysis framework to get the sha1 hashes of files that are
transmitted over the network. For each hash, a SQL-query is run against SQLite. If the query
returns with a result, we had a hit against our malware-database and output the matching hash.
.. btest-include:: ${DOC_ROOT}/frameworks/sqlite-read-events.bro
.. btest:: sqlite-read-events-check
# Make sure this parses correctly at least.
@TEST-EXEC: bro ${DOC_ROOT}/frameworks/sqlite-read-events.bro
If you run this script against the trace in ``testing/btest/Traces/ftp/ipv4.trace``, you
will get one hit.

View file

@ -1,6 +1,9 @@
==========================
Customizing Bro's Logging .. _framework-logging:
==========================
=================
Logging Framework
=================
.. rst-class:: opening .. rst-class:: opening
@ -45,7 +48,7 @@ Basics
The data fields that a stream records are defined by a record type The data fields that a stream records are defined by a record type
specified when it is created. Let's look at the script generating Bro's specified when it is created. Let's look at the script generating Bro's
connection summaries as an example, connection summaries as an example,
:doc:`scripts/base/protocols/conn/main`. It defines a record :doc:`/scripts/base/protocols/conn/main.bro`. It defines a record
:bro:type:`Conn::Info` that lists all the fields that go into :bro:type:`Conn::Info` that lists all the fields that go into
``conn.log``, each marked with a ``&log`` attribute indicating that it ``conn.log``, each marked with a ``&log`` attribute indicating that it
is part of the information written out. To write a log record, the is part of the information written out. To write a log record, the
@ -89,7 +92,8 @@ Note the fields that are set for the filter:
are generated by taking the stream's ID and munging it slightly. are generated by taking the stream's ID and munging it slightly.
:bro:enum:`Conn::LOG` is converted into ``conn``, :bro:enum:`Conn::LOG` is converted into ``conn``,
:bro:enum:`PacketFilter::LOG` is converted into :bro:enum:`PacketFilter::LOG` is converted into
``packet_filter``. ``packet_filter``, and :bro:enum:`Known::CERTS_LOG` is
converted into ``known_certs``.
``include`` ``include``
A set limiting the fields to the ones given. The names A set limiting the fields to the ones given. The names
@ -305,7 +309,7 @@ ASCII Writer Configuration
-------------------------- --------------------------
The ASCII writer has a number of options for customizing the format of The ASCII writer has a number of options for customizing the format of
its output, see :doc:`scripts/base/frameworks/logging/writers/ascii`. its output, see :doc:`/scripts/base/frameworks/logging/writers/ascii.bro`.
Adding Streams Adding Streams
============== ==============
@ -365,7 +369,7 @@ save the logged ``Foo::Info`` record into the connection record:
} }
See the existing scripts for how to work with such a new connection See the existing scripts for how to work with such a new connection
field. A simple example is :doc:`scripts/base/protocols/syslog/main`. field. A simple example is :doc:`/scripts/base/protocols/syslog/main.bro`.
When you are developing scripts that add data to the :bro:type:`connection` When you are developing scripts that add data to the :bro:type:`connection`
record, care must be given to when and how long data is stored. record, care must be given to when and how long data is stored.
@ -383,3 +387,4 @@ Bro supports the following output formats other than ASCII:
logging-dataseries logging-dataseries
logging-elasticsearch logging-elasticsearch
logging-input-sqlite

View file

@ -1,4 +1,6 @@
.. _notice-framework:
Notice Framework Notice Framework
================ ================
@ -98,9 +100,9 @@ type :bro:see:`SSH::Password_Guessing` if the server is 10.0.0.1:
.. note:: .. note::
Keep in mind that the semantics of the SSH::Password_Guessing notice are Keep in mind that the semantics of the :bro:see:`SSH::Password_Guessing`
such that it is only raised when Bro heuristically detects a failed notice are such that it is only raised when Bro heuristically detects
login. a failed login.
Hooks can also have priorities applied to order their execution like events Hooks can also have priorities applied to order their execution like events
with a default priority of 0. Greater values are executed first. Setting with a default priority of 0. Greater values are executed first. Setting
@ -283,7 +285,7 @@ information to suppress duplicates for a configurable period of time.
The ``$identifier`` field is typically comprised of several pieces of The ``$identifier`` field is typically comprised of several pieces of
data related to the notice that when combined represent a unique data related to the notice that when combined represent a unique
instance of that notice. Here is an example of the script instance of that notice. Here is an example of the script
:doc:`scripts/policy/protocols/ssl/validate-certs` raising a notice :doc:`/scripts/policy/protocols/ssl/validate-certs.bro` raising a notice
for session negotiations where the certificate or certificate chain did for session negotiations where the certificate or certificate chain did
not validate successfully against the available certificate authority not validate successfully against the available certificate authority
certificates. certificates.
@ -339,7 +341,7 @@ included below.
hook Notice::policy(n: Notice::Info) hook Notice::policy(n: Notice::Info)
{ {
if ( n?$conn && n$conn?$http && n$conn$http?$host ) if ( n?$conn && n$conn?$http && n$conn$http?$host )
n$email_body_sections[|email_body_sections|] = fmt("HTTP host header: %s", n$conn$http$host); n$email_body_sections[|n$email_body_sections|] = fmt("HTTP host header: %s", n$conn$http$host);
} }
@ -348,7 +350,7 @@ Cluster Considerations
As a user/developer of Bro, the main cluster concern with the notice framework As a user/developer of Bro, the main cluster concern with the notice framework
is understanding what runs where. When a notice is generated on a worker, the is understanding what runs where. When a notice is generated on a worker, the
worker checks to see if the notice shoudl be suppressed based on information worker checks to see if the notice should be suppressed based on information
locally maintained in the worker process. If it's not being locally maintained in the worker process. If it's not being
suppressed, the worker forwards the notice directly to the manager and does no more suppressed, the worker forwards the notice directly to the manager and does no more
local processing. The manager then runs the :bro:see:`Notice::policy` hook and local processing. The manager then runs the :bro:see:`Notice::policy` hook and

View file

@ -1,7 +1,7 @@
========== ===================
Signatures Signature Framework
========== ===================
.. rst-class:: opening .. rst-class:: opening
@ -46,7 +46,7 @@ signature's event statement (``Found root!``), and data is the last
piece of payload which triggered the pattern match. piece of payload which triggered the pattern match.
To turn such :bro:id:`signature_match` events into actual alarms, you can To turn such :bro:id:`signature_match` events into actual alarms, you can
load Bro's :doc:`/scripts/base/frameworks/signatures/main` script. load Bro's :doc:`/scripts/base/frameworks/signatures/main.bro` script.
This script contains a default event handler that raises This script contains a default event handler that raises
:bro:enum:`Signatures::Sensitive_Signature` :doc:`Notices <notice>` :bro:enum:`Signatures::Sensitive_Signature` :doc:`Notices <notice>`
(as well as others; see the beginning of the script). (as well as others; see the beginning of the script).
@ -64,8 +64,8 @@ expect that signature file in the same directory as the Bro script. The
default extension of the file name is ``.sig``, and Bro appends that default extension of the file name is ``.sig``, and Bro appends that
automatically when necessary. automatically when necessary.
Signature language Signature Language for Network Traffic
================== ======================================
Let's look at the format of a signature more closely. Each individual Let's look at the format of a signature more closely. Each individual
signature has the format ``signature <id> { <attributes> }``. ``<id>`` signature has the format ``signature <id> { <attributes> }``. ``<id>``
@ -286,6 +286,44 @@ two actions defined:
connection (``"http"``, ``"ftp"``, etc.). This is used by Bro's connection (``"http"``, ``"ftp"``, etc.). This is used by Bro's
dynamic protocol detection to activate analyzers on the fly. dynamic protocol detection to activate analyzers on the fly.
Signature Language for File Content
===================================
The signature framework can also be used to identify MIME types of files
irrespective of the network protocol/connection over which the file is
transferred. A special type of signature can be written for this
purpose and will be used automatically by the :doc:`Files Framework
<file-analysis>` or by Bro scripts that use the :bro:see:`file_magic`
built-in function.
Conditions
----------
File signatures use a single type of content condition in the form of a
regular expression:
``file-magic /<regular expression>/``
This is analogous to the ``payload`` content condition for the network
traffic signature language described above. The difference is that
``payload`` signatures are applied to payloads of network connections,
but ``file-magic`` can be applied to any arbitrary data, it does not
have to be tied to a network protocol/connection.
Actions
-------
Upon matching a chunk of data, file signatures use the following action
to get information about that data's MIME type:
``file-mime <string> [, <integer>]``
The arguments include the MIME type string associated with the file
magic regular expression and an optional "strength" as a signed integer.
Since multiple file magic signatures may match against a given chunk of
data, the strength value may be used to help choose a "winner". Higher
values are considered stronger.
Things to keep in mind when writing signatures Things to keep in mind when writing signatures
============================================== ==============================================

View file

@ -0,0 +1,12 @@
event bro_init()
{
local filter: Log::Filter =
[
$name="sqlite",
$path="/var/db/conn",
$config=table(["tablename"] = "conn"),
$writer=Log::WRITER_SQLITE
];
Log::add_filter(Conn::LOG, filter);
}

View file

@ -0,0 +1,40 @@
@load frameworks/files/hash-all-files
type Val: record {
hash: string;
description: string;
};
event line(description: Input::EventDescription, tpe: Input::Event, r: Val)
{
print fmt("malware-hit with hash %s, description %s", r$hash, r$description);
}
global malware_source = "/var/db/malware";
event file_hash(f: fa_file, kind: string, hash: string)
{
# check all sha1 hashes
if ( kind=="sha1" )
{
Input::add_event(
[
$source=malware_source,
$name=hash,
$fields=Val,
$ev=line,
$want_record=T,
$config=table(
["query"] = fmt("select * from malware_hashes where hash='%s';", hash)
),
$reader=Input::READER_SQLITE
]);
}
}
event Input::end_of_data(name: string, source:string)
{
if ( source == malware_source )
Input::remove(name);
}

View file

@ -0,0 +1,35 @@
type Idx: record {
host: addr;
};
type Val: record {
users: set[string];
};
global hostslist: table[addr] of Val = table();
event bro_init()
{
Input::add_table([$source="/var/db/hosts",
$name="hosts",
$idx=Idx,
$val=Val,
$destination=hostslist,
$reader=Input::READER_SQLITE,
$config=table(["query"] = "select * from machines_to_users;")
]);
Input::remove("hosts");
}
event Input::end_of_data(name: string, source: string)
{
if ( name != "hosts" )
return;
# now all data is in the table
print "Hosts list has been successfully imported";
# List the users of one host.
print hostslist[192.168.17.1]$users;
}

View file

@ -0,0 +1,36 @@
@load base/frameworks/sumstats
event connection_established(c: connection)
{
# Make an observation!
# This observation is global so the key is empty.
# Each established connection counts as one so the observation is always 1.
SumStats::observe("conn established",
SumStats::Key(),
SumStats::Observation($num=1));
}
event bro_init()
{
# Create the reducer.
# The reducer attaches to the "conn established" observation stream
# and uses the summing calculation on the observations.
local r1 = SumStats::Reducer($stream="conn established",
$apply=set(SumStats::SUM));
# Create the final sumstat.
# We give it an arbitrary name and make it collect data every minute.
# The reducer is then attached and a $epoch_result callback is given
# to finally do something with the data collected.
SumStats::create([$name = "counting connections",
$epoch = 1min,
$reducers = set(r1),
$epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) =
{
# This is the body of the callback that is called when a single
# result has been collected. We are just printing the total number
# of connections that were seen. The $sum field is provided as a
# double type value so we need to use %f as the format specifier.
print fmt("Number of connections established: %.0f", result["conn established"]$sum);
}]);
}

View file

@ -0,0 +1,45 @@
@load base/frameworks/sumstats
# We use the connection_attempt event to limit our observations to those
# which were attempted and not successful.
event connection_attempt(c: connection)
{
# Make an observation!
# This observation is about the host attempting the connection.
# Each established connection counts as one so the observation is always 1.
SumStats::observe("conn attempted",
SumStats::Key($host=c$id$orig_h),
SumStats::Observation($num=1));
}
event bro_init()
{
# Create the reducer.
# The reducer attaches to the "conn attempted" observation stream
# and uses the summing calculation on the observations. Keep
# in mind that there will be one result per key (connection originator).
local r1 = SumStats::Reducer($stream="conn attempted",
$apply=set(SumStats::SUM));
# Create the final sumstat.
# This is slightly different from the last example since we're providing
# a callback to calculate a value to check against the threshold with
# $threshold_val. The actual threshold itself is provided with $threshold.
# Another callback is provided for when a key crosses the threshold.
SumStats::create([$name = "finding scanners",
$epoch = 5min,
$reducers = set(r1),
# Provide a threshold.
$threshold = 5.0,
# Provide a callback to calculate a value from the result
# to check against the threshold field.
$threshold_val(key: SumStats::Key, result: SumStats::Result) =
{
return result["conn attempted"]$sum;
},
# Provide a callback for when a key crosses the threshold.
$threshold_crossed(key: SumStats::Key, result: SumStats::Result) =
{
print fmt("%s attempted %.0f or more connections", key$host, result["conn attempted"]$sum);
}]);
}

105
doc/frameworks/sumstats.rst Normal file
View file

@ -0,0 +1,105 @@
.. _sumstats-framework:
==================
Summary Statistics
==================
.. rst-class:: opening
Measuring aspects of network traffic is an extremely common task in Bro.
Bro provides data structures which make this very easy as well in
simplistic cases such as size limited trace file processing. In
real-world deployments though, there are difficulties that arise from
clusterization (many processes sniffing traffic) and unbounded data sets
(traffic never stops). The Summary Statistics (otherwise referred to as
SumStats) framework aims to define a mechanism for consuming unbounded
data sets and making them measurable in practice on large clustered and
non-clustered Bro deployments.
.. contents::
Overview
========
The Sumstat processing flow is broken into three pieces. Observations, where
some aspect of an event is observed and fed into the Sumstats framework.
Reducers, where observations are collected and measured, typically by taking
some sort of summary statistic measurement like average or variance (among
others). Sumstats, where reducers have an epoch (time interval) that their
measurements are performed over along with callbacks for monitoring thresholds
or viewing the collected and measured data.
Terminology
===========
Observation
A single point of data. Observations have a few components of their
own. They are part of an arbitrarily named observation stream, they
have a key that is something the observation is about, and the actual
observation itself.
Reducer
Calculations are applied to an observation stream here to reduce the
full unbounded set of observations down to a smaller representation.
Results are collected within each reducer per-key so care must be
taken to keep the total number of keys tracked down to a reasonable
level.
Sumstat
The final definition of a Sumstat where one or more reducers is
collected over an interval, also known as an epoch. Thresholding can
be applied here along with a callback in the event that a threshold is
crossed. Additionally, a callback can be provided to access each
result (per-key) at the end of each epoch.
Examples
========
These examples may seem very simple to an experienced Bro script developer and
they're intended to look that way. Keep in mind that these scripts will work
on small single process Bro instances as well as large many-worker clusters.
The complications from dealing with flow based load balancing can be ignored
by developers writing scripts that use Sumstats due to its built-in cluster
transparency.
Printing the number of connections
----------------------------------
Sumstats provides a simple way of approaching the problem of trying to count
the number of connections over a given time interval. Here is a script with
inline documentation that does this with the Sumstats framework:
.. btest-include:: ${DOC_ROOT}/frameworks/sumstats-countconns.bro
When run on a sample PCAP file from the Bro test suite, the following output
is created:
.. btest:: sumstats-countconns
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/workshop_2011_browse.trace ${DOC_ROOT}/frameworks/sumstats-countconns.bro
Toy scan detection
------------------
Taking the previous example even further, we can implement a simple detection
to demonstrate the thresholding functionality. This example is a toy to
demonstrate how thresholding works in Sumstats and is not meant to be a
real-world functional example, that is left to the
:doc:`/scripts/policy/misc/scan.bro` script that is included with Bro.
.. btest-include:: ${DOC_ROOT}/frameworks/sumstats-toy-scan.bro
Let's see if there are any hosts that crossed the threshold in a PCAP file
containing a host running nmap:
.. btest:: sumstats-toy-scan
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/nmap-vsn.trace ${DOC_ROOT}/frameworks/sumstats-toy-scan.bro
It seems the host running nmap was detected!

View file

@ -0,0 +1,24 @@
global mime_to_ext: table[string] of string = {
["application/x-dosexec"] = "exe",
["text/plain"] = "txt",
["image/jpeg"] = "jpg",
["image/png"] = "png",
["text/html"] = "html",
};
event file_new(f: fa_file)
{
if ( f$source != "HTTP" )
return;
if ( ! f?$mime_type )
return;
if ( f$mime_type !in mime_to_ext )
return;
local fname = fmt("%s-%s.%s", f$source, f$id, mime_to_ext[f$mime_type]);
print fmt("Extracting file %s", fname);
Files::add_analyzer(f, Files::ANALYZER_EXTRACT, [$extract_filename=fname]);
}

View file

@ -0,0 +1,5 @@
event http_reply(c: connection, version: string, code: count, reason: string)
{
if ( /^[hH][tT][tT][pP]:/ in c$http$uri && c$http$status_code == 200 )
print fmt("A local server is acting as an open proxy: %s", c$id$resp_h);
}

View file

@ -0,0 +1,26 @@
module HTTP;
export {
global success_status_codes: set[count] = {
200,
201,
202,
203,
204,
205,
206,
207,
208,
226,
304
};
}
event http_reply(c: connection, version: string, code: count, reason: string)
{
if ( /^[hH][tT][tT][pP]:/ in c$http$uri &&
c$http$status_code in HTTP::success_status_codes )
print fmt("A local server is acting as an open proxy: %s", c$id$resp_h);
}

View file

@ -0,0 +1,31 @@
@load base/utils/site
redef Site::local_nets += { 192.168.0.0/16 };
module HTTP;
export {
global success_status_codes: set[count] = {
200,
201,
202,
203,
204,
205,
206,
207,
208,
226,
304
};
}
event http_reply(c: connection, version: string, code: count, reason: string)
{
if ( Site::is_local_addr(c$id$resp_h) &&
/^[hH][tT][tT][pP]:/ in c$http$uri &&
c$http$status_code in HTTP::success_status_codes )
print fmt("A local server is acting as an open proxy: %s", c$id$resp_h);
}

View file

@ -0,0 +1,40 @@
@load base/utils/site
@load base/frameworks/notice
redef Site::local_nets += { 192.168.0.0/16 };
module HTTP;
export {
redef enum Notice::Type += {
Open_Proxy
};
global success_status_codes: set[count] = {
200,
201,
202,
203,
204,
205,
206,
207,
208,
226,
304
};
}
event http_reply(c: connection, version: string, code: count, reason: string)
{
if ( Site::is_local_addr(c$id$resp_h) &&
/^[hH][tT][tT][pP]:/ in c$http$uri &&
c$http$status_code in HTTP::success_status_codes )
NOTICE([$note=HTTP::Open_Proxy,
$msg=fmt("A local server is acting as an open proxy: %s",
c$id$resp_h),
$conn=c,
$identifier=cat(c$id$resp_h),
$suppress_for=1day]);
}

162
doc/httpmonitor/index.rst Normal file
View file

@ -0,0 +1,162 @@
.. _http-monitor:
================================
Monitoring HTTP Traffic with Bro
================================
Bro can be used to log the entire HTTP traffic from your network to the
http.log file. This file can then be used for analysis and auditing
purposes.
In the sections below we briefly explain the structure of the http.log
file, then we show you how to perform basic HTTP traffic monitoring and
analysis tasks with Bro. Some of these ideas and techniques can later be
applied to monitor different protocols in a similar way.
----------------------------
Introduction to the HTTP log
----------------------------
The http.log file contains a summary of all HTTP requests and responses
sent over a Bro-monitored network. Here are the first few columns of
``http.log``::
# ts uid orig_h orig_p resp_h resp_p
1311627961.8 HSH4uV8KVJg 192.168.1.100 52303 192.150.187.43 80
Every single line in this log starts with a timestamp, a unique
connection identifier (UID), and a connection 4-tuple (originator
host/port and responder host/port). The UID can be used to identify all
logged activity (possibly across multiple log files) associated with a
given connection 4-tuple over its lifetime.
The remaining columns detail the activity that's occurring. For
example, the columns on the line below (shortened for brevity) show a
request to the root of Bro website::
# method host uri referrer user_agent
GET bro.org / - <...>Chrome/12.0.742.122<...>
Network administrators and security engineers, for instance, can use the
information in this log to understand the HTTP activity on the network
and troubleshoot network problems or search for anomalous activities. We must
stress that there is no single right way to perform an analysis. It will
depend on the expertise of the person performing the analysis and the
specific details of the task.
For more information about how to handle the HTTP protocol in Bro,
including a complete list of the fields available in http.log, go to
Bro's :doc:`HTTP script reference
</scripts/base/protocols/http/main.bro>`.
------------------------
Detecting a Proxy Server
------------------------
A proxy server is a device on your network configured to request a
service on behalf of a third system; one of the most common examples is
a Web proxy server. A client without Internet access connects to the
proxy and requests a web page, the proxy sends the request to the web
server, which receives the response, and passes it to the original
client.
Proxies were conceived to help manage a network and provide better
encapsulation. Proxies by themselves are not a security threat, but a
misconfigured or unauthorized proxy can allow others, either inside or
outside the network, to access any web site and even conduct malicious
activities anonymously using the network's resources.
What Proxy Server traffic looks like
-------------------------------------
In general, when a client starts talking with a proxy server, the
traffic consists of two parts: (i) a GET request, and (ii) an HTTP/
reply::
Request: GET http://www.bro.org/ HTTP/1.1
Reply: HTTP/1.0 200 OK
This will differ from traffic between a client and a normal Web server
because GET requests should not include "http" on the string. So we can
use this to identify a proxy server.
We can write a basic script in Bro to handle the http_reply event and
detect a reply for a ``GET http://`` request.
.. btest-include:: ${DOC_ROOT}/httpmonitor/http_proxy_01.bro
.. btest:: http_proxy_01
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/proxy.pcap ${DOC_ROOT}/httpmonitor/http_proxy_01.bro
Basically, the script is checking for a "200 OK" status code on a reply
for a request that includes "http:" (case insensitive). In reality, the
HTTP protocol defines several success status codes other than 200, so we
will extend our basic script to also consider the additional codes.
.. btest-include:: ${DOC_ROOT}/httpmonitor/http_proxy_02.bro
.. btest:: http_proxy_02
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/proxy.pcap ${DOC_ROOT}/httpmonitor/http_proxy_02.bro
Next, we will make sure that the responding proxy is part of our local
network.
.. btest-include:: ${DOC_ROOT}/httpmonitor/http_proxy_03.bro
.. btest:: http_proxy_03
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/proxy.pcap ${DOC_ROOT}/httpmonitor/http_proxy_03.bro
.. note::
The redefinition of :bro:see:`Site::local_nets` is only done inside
this script to make it a self-contained example. It's typically
redefined somewhere else.
Finally, our goal should be to generate an alert when a proxy has been
detected instead of printing a message on the console output. For that,
we will tag the traffic accordingly and define a new ``Open_Proxy``
``Notice`` type to alert of all tagged communications. Once a
notification has been fired, we will further suppress it for one day.
Below is the complete script.
.. btest-include:: ${DOC_ROOT}/httpmonitor/http_proxy_04.bro
.. btest:: http_proxy_04
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/proxy.pcap ${DOC_ROOT}/httpmonitor/http_proxy_04.bro
@TEST-EXEC: btest-rst-include notice.log
Note that this script only logs the presence of the proxy to
``notice.log``, but if an additional email is desired (and email
functionality is enabled), then that's done simply by redefining
:bro:see:`Notice::emailed_types` to add the ``Open_proxy`` notice type
to it.
----------------
Inspecting Files
----------------
Files are often transmitted on regular HTTP conversations between a
client and a server. Most of the time these files are harmless, just
images and some other multimedia content, but there are also types of
files, specially executable files, that can damage your system. We can
instruct Bro to create a copy of all files of certain types that it sees
using the :ref:`File Analysis Framework <file-analysis-framework>`
(introduced with Bro 2.2):
.. btest-include:: ${DOC_ROOT}/httpmonitor/file_extraction.bro
.. btest:: file_extraction
@TEST-EXEC: btest-rst-cmd -n 5 bro -r ${TRACES}/http/bro.org.pcap ${DOC_ROOT}/httpmonitor/file_extraction.bro
Here, the ``mime_to_ext`` table serves two purposes. It defines which
mime types to extract and also the file suffix of the extracted files.
Extracted files are written to a new ``extract_files`` subdirectory.
Also note that the first conditional in the :bro:see:`file_new` event
handler can be removed to make this behavior generic to other protocols
besides HTTP.

View file

@ -1,91 +1,52 @@
.. Bro documentation master file .. Bro documentation master file
================= ==========
Bro Documentation Bro Manual
==========
Introduction Section
====================
.. toctree::
:maxdepth: 2
intro/index.rst
cluster/index.rst
install/index.rst
quickstart/index.rst
configuration/index.rst
..
.. _using-bro:
Using Bro Section
================= =================
Guides .. toctree::
------ :maxdepth: 2
logs/index.rst
httpmonitor/index.rst
broids/index.rst
mimestats/index.rst
..
Reference Section
=================
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 2
INSTALL scripting/index.rst
upgrade frameworks/index.rst
quickstart script-reference/index.rst
faq components/index.rst
reporting-problems
Frameworks ..
----------
.. toctree::
:maxdepth: 1
notice
logging
input
file-analysis
cluster
signatures
How-Tos
-------
.. toctree::
:maxdepth: 1
geoip
Script Reference
----------------
.. toctree::
:maxdepth: 1
scripts/packages
scripts/index
scripts/builtins
scripts/proto-analyzers
Other Bro Components
--------------------
The following are snapshots of documentation for components that come
with this version of Bro (|version|). Since they can also be used
independently, see the `download page
<http://bro.org/download/index.html>`_ for documentation of any
current, independent component releases.
.. toctree::
:maxdepth: 1
BinPAC - A protocol parser generator <components/binpac/README>
Broccoli - The Bro Client Communication Library (README) <components/broccoli/README>
Broccoli - User Manual <components/broccoli/broccoli-manual>
Broccoli Python Bindings <components/broccoli-python/README>
Broccoli Ruby Bindings <components/broccoli-ruby/README>
BroControl - Interactive Bro management shell <components/broctl/README>
Bro-Aux - Small auxiliary tools for Bro <components/bro-aux/README>
BTest - A unit testing framework <components/btest/README>
Capstats - Command-line packet statistic tool <components/capstats/README>
PySubnetTree - Python module for CIDR lookups<components/pysubnettree/README>
trace-summary - Script for generating break-downs of network traffic <components/trace-summary/README>
The `Broccoli API Reference <broccoli-api/index.html>`_ may also be of
interest.
Other Indices and References
----------------------------
* :ref:`General Index <genindex>` * :ref:`General Index <genindex>`
* `Notice Index <bro-noticeindex.html>`_
* :ref:`search` * :ref:`search`
Internal References
-------------------
.. toctree::
:maxdepth: 1
scripts/internal

View file

@ -0,0 +1 @@
../../aux/binpac/CHANGES

View file

@ -0,0 +1 @@
../../aux/bro-aux/CHANGES

1
doc/install/CHANGES-bro.txt Symbolic link
View file

@ -0,0 +1 @@
../../CHANGES

View file

@ -0,0 +1 @@
../../aux/broccoli/bindings/broccoli-python/CHANGES

View file

@ -0,0 +1 @@
../../aux/broccoli/bindings/broccoli-ruby/CHANGES

View file

@ -0,0 +1 @@
../../aux/broccoli/CHANGES

View file

@ -0,0 +1 @@
../../aux/broctl/CHANGES

View file

@ -0,0 +1 @@
../../aux/btest/CHANGES

View file

@ -0,0 +1 @@
../../aux/broctl/aux/capstats/CHANGES

View file

@ -0,0 +1 @@
../../aux/broctl/aux/pysubnettree/CHANGES

View file

@ -0,0 +1 @@
../../aux/broctl/aux/trace-summary/CHANGES

1
doc/install/NEWS.rst Symbolic link
View file

@ -0,0 +1 @@
../../NEWS

75
doc/install/changes.rst Normal file
View file

@ -0,0 +1,75 @@
========================
Detailed Version History
========================
.. contents::
---
Bro
---
.. literalinclude:: CHANGES-bro.txt
----------
BroControl
----------
.. literalinclude:: CHANGES-broctl.txt
--------
Broccoli
--------
.. literalinclude:: CHANGES-broccoli.txt
---------------
Broccoli Python
---------------
.. literalinclude:: CHANGES-broccoli-python.txt
-------------
Broccoli Ruby
-------------
.. literalinclude:: CHANGES-broccoli-ruby.txt
--------
Capstats
--------
.. literalinclude:: CHANGES-capstats.txt
-------------
Trace-Summary
-------------
.. literalinclude:: CHANGES-trace-summary.txt
------
BinPAC
------
.. literalinclude:: CHANGES-binpac.txt
-------
Bro-Aux
-------
.. literalinclude:: CHANGES-bro-aux.txt
-----
BTest
-----
.. literalinclude:: CHANGES-btest.txt
------------
PySubnetTree
------------
.. literalinclude:: CHANGES-pysubnettree.txt

View file

@ -0,0 +1,47 @@
.. _upgrade-guidelines:
==============
How to Upgrade
==============
If you're doing an upgrade install (rather than a fresh install),
there's two suggested approaches: either install Bro using the same
installation prefix directory as before, or pick a new prefix and copy
local customizations over. Regardless of which approach you choose,
if you are using BroControl, then after upgrading Bro you will need to
run "broctl check" (to verify that your new configuration is OK)
and "broctl install" to complete the upgrade process.
In the following we summarize general guidelines for upgrading, see
the :ref:`release-notes` for version-specific information.
Reusing Previous Install Prefix
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you choose to configure and install Bro with the same prefix
directory as before, local customization and configuration to files in
``$prefix/share/bro/site`` and ``$prefix/etc`` won't be overwritten
(``$prefix`` indicating the root of where Bro was installed). Also, logs
generated at run-time won't be touched by the upgrade. Backing up local
changes before upgrading is still recommended.
After upgrading, remember to check ``$prefix/share/bro/site`` and
``$prefix/etc`` for ``.example`` files, which indicate that the
distribution's version of the file differs from the local one, and therefore,
may include local changes. Review the differences and make adjustments
as necessary. Use the new version for differences that aren't a result of
a local change.
Using a New Install Prefix
~~~~~~~~~~~~~~~~~~~~~~~~~~
To install the newer version in a different prefix directory than before,
copy local customization and configuration files from ``$prefix/share/bro/site``
and ``$prefix/etc`` to the new location (``$prefix`` indicating the root of
where Bro was originally installed). Review the files for differences
before copying and make adjustments as necessary (use the new version for
differences that aren't a result of a local change). Of particular note,
the copied version of ``$prefix/etc/broctl.cfg`` is likely to need changes
to the ``SpoolDir`` and ``LogDir`` settings.

12
doc/install/index.rst Normal file
View file

@ -0,0 +1,12 @@
.. _installation:
============
Installation
============
.. toctree::
:maxdepth: 2
install
upgrade

211
doc/install/install.rst Normal file
View file

@ -0,0 +1,211 @@
.. _CMake: http://www.cmake.org
.. _SWIG: http://www.swig.org
.. _Xcode: https://developer.apple.com/xcode/
.. _MacPorts: http://www.macports.org
.. _Fink: http://www.finkproject.org
.. _Homebrew: http://brew.sh
.. _bro downloads page: http://bro.org/download/index.html
.. _installing-bro:
==============
Installing Bro
==============
.. contents::
Prerequisites
=============
Before installing Bro, you'll need to ensure that some dependencies
are in place.
Required Dependencies
---------------------
Bro requires the following libraries and tools to be installed
before you begin:
* Libpcap (http://www.tcpdump.org)
* OpenSSL libraries (http://www.openssl.org)
* BIND8 library
* Libz
* Bash (for BroControl)
* Python (for BroControl)
To build Bro from source, the following additional dependencies are required:
* CMake 2.6.3 or greater (http://www.cmake.org)
* Make
* C/C++ compiler
* SWIG (http://www.swig.org)
* Bison (GNU Parser Generator)
* Flex (Fast Lexical Analyzer)
* Libpcap headers (http://www.tcpdump.org)
* OpenSSL headers (http://www.openssl.org)
* zlib headers
* Perl
To install the required dependencies, you can use (when done, make sure
that ``bash`` and ``python`` are in your ``PATH``):
* RPM/RedHat-based Linux:
.. console::
sudo yum install cmake make gcc gcc-c++ flex bison libpcap-devel openssl-devel python-devel swig zlib-devel
* DEB/Debian-based Linux:
.. console::
sudo apt-get install cmake make gcc g++ flex bison libpcap-dev libssl-dev python-dev swig zlib1g-dev
* FreeBSD:
Most required dependencies should come with a minimal FreeBSD install
except for the following.
.. console::
sudo pkg_add -r bash cmake swig bison python perl
* Mac OS X:
Compiling source code on Macs requires first downloading Xcode_,
then going through its "Preferences..." -> "Downloads" menus to
install the "Command Line Tools" component.
OS X comes with all required dependencies except for CMake_ and SWIG_.
Distributions of these dependencies can likely be obtained from your
preferred Mac OS X package management system (e.g. MacPorts_, Fink_,
or Homebrew_). Specifically for MacPorts, the ``cmake``, ``swig``,
and ``swig-python`` packages provide the required dependencies.
Optional Dependencies
---------------------
Bro can make use of some optional libraries and tools if they are found at
build time:
* LibGeoIP (for geolocating IP addresses)
* sendmail (enables Bro and BroControl to send mail)
* gawk (enables all features of bro-cut)
* curl (used by a Bro script that implements active HTTP)
* gperftools (tcmalloc is used to improve memory and CPU usage)
* ipsumdump (for trace-summary; http://www.cs.ucla.edu/~kohler/ipsumdump)
* Ruby executable, library, and headers (for Broccoli Ruby bindings)
LibGeoIP is probably the most interesting and can be installed
on most platforms by following the instructions for :ref:`installing
libGeoIP and the GeoIP database
<geolocation>`.
Installing Bro
==============
Bro can be downloaded in either pre-built binary package or source
code forms.
Using Pre-Built Binary Release Packages
=======================================
See the `bro downloads page`_ for currently supported/targeted
platforms for binary releases.
* RPM
.. console::
sudo yum localinstall Bro-*.rpm
* DEB
.. console::
sudo gdebi Bro-*.deb
* MacOS Disk Image with Installer
Just open the ``Bro-*.dmg`` and then run the ``.pkg`` installer.
Everything installed by the package will go into ``/opt/bro``.
The primary install prefix for binary packages is ``/opt/bro``.
Non-MacOS packages that include BroControl also put variable/runtime
data (e.g. Bro logs) in ``/var/opt/bro``.
Installing from Source
==========================
Bro releases are bundled into source packages for convenience and are
available on the `bro downloads page`_. Alternatively, the latest
Bro development version can be obtained through git repositories
hosted at ``git.bro.org``. See our `git development documentation
<http://bro.org/development/howtos/process.html>`_ for comprehensive
information on Bro's use of git revision control, but the short story
for downloading the full source code experience for Bro via git is:
.. console::
git clone --recursive git://git.bro.org/bro
.. note:: If you choose to clone the ``bro`` repository
non-recursively for a "minimal Bro experience", be aware that
compiling it depends on several of the other submodules as well.
The typical way to build and install from source is (for more options,
run ``./configure --help``):
.. console::
./configure
make
make install
The default installation path is ``/usr/local/bro``, which would typically
require root privileges when doing the ``make install``. A different
installation path can be chosen by specifying the ``--prefix`` option.
Note that ``/usr`` and ``/opt/bro`` are the
standard prefixes for binary Bro packages to be installed, so those are
typically not good choices unless you are creating such a package.
Depending on the Bro package you downloaded, there may be auxiliary
tools and libraries available in the ``aux/`` directory. Some of them
will be automatically built and installed along with Bro. There are
``--disable-*`` options that can be given to the configure script to
turn off unwanted auxiliary projects that would otherwise be installed
automatically. Finally, use ``make install-aux`` to install some of
the other programs that are in the ``aux/bro-aux`` directory.
OpenBSD users, please see our `FAQ
<http://www.bro.org/documentation/faq.html>`_ if you are having
problems installing Bro.
Finally, if you want to build the Bro documentation (not required, because
all of the documentation for the latest Bro release is available on the
Bro web site), there are instructions in ``doc/README`` in the source
distribution.
Configure the Run-Time Environment
==================================
Just remember that you may need to adjust your ``PATH`` environment variable
according to the platform/shell/package you're using. For example:
Bourne-Shell Syntax:
.. console::
export PATH=/usr/local/bro/bin:$PATH
C-Shell Syntax:
.. console::
setenv PATH /usr/local/bro/bin:$PATH
Or substitute ``/opt/bro/bin`` instead if you installed from a binary package.

View file

@ -0,0 +1,13 @@
.. _release-notes:
=============
Release Notes
=============
.. contents::
.. include:: NEWS.rst

10
doc/install/upgrade.rst Normal file
View file

@ -0,0 +1,10 @@
=============
Upgrading Bro
=============
.. toctree::
guidelines
release-notes
changes

BIN
doc/intro/architecture.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

BIN
doc/intro/bro-eyes.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

BIN
doc/intro/history.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 159 KiB

247
doc/intro/index.rst Normal file
View file

@ -0,0 +1,247 @@
============
Introduction
============
.. contents::
Overview
--------
Bro is a passive, open-source network traffic analyzer. It is
primarily a security monitor that inspects all traffic on a link in
depth for signs of suspicious activity. More generally, however,
Bro supports a wide range of traffic analysis tasks even outside of
the security domain, including performance measurements and helping
with trouble-shooting.
The most immediate benefit that a site gains from deploying Bro is an
extensive set of *log files* that record a network's activity in
high-level terms. These logs include not only a comprehensive record
of every connection seen on the wire, but also application-layer
transcripts such as, e.g., all HTTP sessions with their requested
URIs, key headers, MIME types, and server responses; DNS requests with
replies; SSL certificates; key content of SMTP sessions; and much
more. By default, Bro writes all this information into well-structured
tab-separated log files suitable for post-processing with external
software. Users can however also chose from a set of alternative
output formats and backends to interface directly with, e.g., external
databases.
In addition to the logs, Bro comes with built-in functionality for a
range of analysis and detection tasks, including extracting files from
HTTP sessions, detecting malware by interfacing to external
registries, reporting vulnerable versions of software seen on the
network, identifying popular web applications, detecting SSH
brute-forcing, validating SSL certificate chains, and much more.
However, the key to understanding Bro lies in realizing that even
though the system comes with such powerful functionality out of the
box, fundamentally it represents a *platform* for traffic analyses
that's fully customizable and extensible: Bro provides users with a
domain-specific, Turing-complete *scripting language* for expressing
arbitrary analysis tasks. Conceptually, you can think of Bro as a
"domain-specific Python" (or Perl): just like Python, the system comes
with a large set of pre-built functionality (the "standard library"),
yet you are not limited to what the system ships with but can put Bro
to use in novel ways by writing your own code. Indeed, all of Bro's
default analyses, including all the logging, is the result of such
scripts; there's no specific analysis hard-coded into the core of
system.
Bro runs on commodity hardware and hence provides a low-cost
alternative to expensive proprietary solutions. Despite the price tag,
however, Bro actually goes far beyond the capabilities of other
network monitoring tools, which typically remain limited to a small
set of hard-coded analysis tasks. We emphasize in particular that Bro
is *not* a classic signature-based intrusion detection system (IDS).
While it supports such standard functionality as well, Bro's scripting
language indeed facilitates a much broader spectrum of very different
approaches to finding malicious activity, including semantic misuse
detection, anomaly detection, and behavioral analysis.
A large variety of sites deploy Bro operationally for protecting their
cyberinfrastructure, including many universities, research labs,
supercomputing centers, open-science communities, and major
corporations. Bro specifically targets high-speed, high-volume network
monitoring, and an increasing number of sites are now using the system
to monitor their 10GE networks, with some already moving on to 100GE
links. Bro accommodates such high-performance settings by supporting
scalable load-balancing: large sites typically run "Bro Clusters" in
which a high-speed frontend load-balancer distributes the traffic
across an appropriate number of backend PCs, all running dedicated Bro
instances on their individual traffic slices. A central manager system
coordinates the process, synchronizing state across the backends and
providing the operators with a central management interface for
configuration and access to aggregated logs. Bro's integrated
management framework, BroControl, supports such cluster setups
out-of-the-box.
Features
--------
Bro supports a wide range of analyses through its scripting language.
Yet even without further customization it comes with a powerful set of
features.
- Deployment
* Runs on commodity hardware on standard UNIX-style systems
(including Linux, FreeBSD, and MacOS).
* Fully passive traffic analysis off a network tap or monitoring
port.
* Standard libpcap interface for capturing packets.
* Real-time and offline analysis.
* Cluster-support for large-scale deployments.
* Unified management framework for operating both standalone and
cluster setups.
* Open-source under a BSD license.
- Analysis
* Comprehensive logging of activity for offline analysis and
forensics.
* Port-independent analysis of application-layer protocols.
* Support for many application-layer protocols (including DNS,
FTP, HTTP, IRC, SMTP, SSH, SSL).
* Analysis of file content exchanged over application-layer
protocols, including MD5/SHA1 computation for fingerprinting.
* Comprehensive IPv6 support.
* Tunnel detection and analysis (including Ayiya, Teredo, GTPv1).
Bro decapsulates the tunnels and then proceeds to analyze their
content as if no tunnel was in place.
* Extensive sanity checks during protocol analysis.
* Support for IDS-style pattern matching.
- Scripting Language
* Turing-complete language for expression arbitrary analysis
tasks.
* Event-based programming model.
* Domain-specific data types such as IP addresses (transparently
handling both IPv4 and IPv6), port numbers, and timers.
* Extensive support for tracking and managing network state over
time.
- Interfacing
* Default output to well-structured ASCII logs.
* Alternative backends for ElasticSearch and DataSeries. Further
database interfaces in preparation.
* Real-time integration of external input into analyses. Live
database input in preparation.
* External C library for exchanging Bro events with external
programs. Comes with Perl, Python, and Ruby bindings.
* Ability to trigger arbitrary external processes from within
the scripting language.
History
-------
.. figure:: history.png
:width: 600
:align: center
:alt: Bro History Timeline
:target: ../_images/history.png
Timeline of Bro's History (click to enlarge).
Bro's history goes back much further than many people realize. `Vern
Paxson <http://www.icir.org/vern>`_ designed and implemented the
initial version almost two decades ago.
Vern began work on the code in 1995 as a researcher at the `Lawrence
Berkeley National Laboratory (LBNL) <http://www.lbl.gov>`_. Berkeley
Lab began operational deployment in 1996, and the USENIX Security
Symposium published the original Bro paper in 1998 (later refined in a
subsequent `journal publication <http://www.icir.org/vern/papers/bro-CN99.pdf>`_).
In 2003, the
`National Science Foundation (NSF) <http://www.nsf.gov>`_ began
supporting research and advanced development on Bro at the
`International Computer Science Institute (ICSI)
<http://www.icsi.berkeley.edu>`_, where Vern now leads the `Networking
and Security group <http://www.icir.org>`_. Over the years, a growing
team of ICSI researchers and students kept adding novel functionality
to Bro, while LBNL continued its support with funding from the
`Department of Energy (DOE) <http://www.doe.gov>`_.
Much of Bro's capabilities originate in academic research projects,
with results often published at top-tier conferences. However, the key
to Bro's success was its ability to bridge the traditional gap between
academia and operations from early on, which provided the research
with crucial grounding to ensure that developed approaches stand up to
the challenges of the real world. Yet, with Bro's operational user
community growing over time, the research-centric development model
eventually became a bottleneck to the system's evolution: research
grants do not tend to support the more mundane parts of software
development and maintenance, even though those prove crucial for the
end-user experience. While Bro's capabilities always went beyond those
of traditional systems, a successful deployment used to require
significant technical expertise, typically with a large upfront
investment in tackling Bro's steep learning curve. In 2010, NSF set
out to address this gap by awarding ICSI a grant dedicated solely to
Bro development out of its SDCI program.
With that support in place, the `National Center for
Supercomputing Applications (NCSA) <http://www.ncsa.illinois.edu>`_
joined the team as a core partner, and the Bro Project began to
completely overhaul many of the user-visible parts of the system for
the 2.0 release. Since that version came out, Bro has experienced an
tremendous growth in new deployments across a diverse range of
settings, and the Bro team is now working to build on this success by
further advancing the system's capabilities to address the challenges
of future networks.
Architecture
------------
.. figure:: architecture.png
:width: 400
:align: center
:alt: Bro Architecture
:target: ../_images/architecture.png
Bro's internal architecture.
Architecturally, Bro is layered into two major components. Its *event
engine* (or *core*) reduces the incoming packet stream into a series
of higher-level *events*. These events reflect network activity in
policy-neutral terms, i.e., they describe *what* has been seen, but
not *why*, or whether it is significant. For example, every HTTP
request on the wire turns into a corresponding ``http_request`` event
that carries with it the involved IP addresses and ports, the URI
being requested, and the HTTP version in use. The event however does
not convey any further *interpretation*, e.g., of whether that URI
corresponds to a known malware site.
Such semantics are instead derived by Bro's second main component, the
*script interpreter*, which executes a set of *event handlers* written
in Bro's custom scripting language. These scripts can express a site's
security policy, i.e., what actions to take when the monitor detects
different types of activity. More generally they can derive any
desired properties and statistics from the input traffic. Bro's
language comes with extensive domain-specific types and support
functionality; and, crucially, allows scripts to maintain state
over time, enabling them to track and correlate the evolution of what
they observe across connection and host boundaries. Bro scripts can
generate real-time alerts and also execute arbitrary external programs
on demand, e.g., to trigger an active response to an attack.

293
doc/logs/index.rst Normal file
View file

@ -0,0 +1,293 @@
.. _bro-logging:
===========
Bro Logging
===========
.. contents::
Once Bro has been deployed in an environment and monitoring live
traffic, it will, in its default configuration, begin to produce
human-readable ASCII logs. Each log file, produced by Bro's
:ref:`framework-logging`, is populated with organized, mostly
connection-oriented data. As the standard log files are simple ASCII
data, working with the data contained in them can be done from a
command line terminal once you have been familiarized with the types
of data that can be found in each file. In the following, we work
through the logs general structure and then examine some standard ways
of working with them.
----------------------
Working with Log Files
----------------------
Generally, all of Bro's log files are produced by a corresponding
script that defines their individual structure. However, as each log
file flows through the Logging Framework, they share a set of
structural similarities. Without breaking into the scripting aspect of
Bro here, a bird's eye view of how the log files are produced
progresses as follows. The script's author defines the kinds of data,
such as the originating IP address or the duration of a connection,
which will make up the fields (i.e., columns) of the log file. The
author then decides what network activity should generate a single log
file entry (i.e., one line). For example, this could be a connection
having been completed or an HTTP ``GET`` request being issued by an
originator. When these behaviors are observed during operation, the
data is passed to the Logging Framework which adds the entry
to the appropriate log file.
As the fields of the log entries can be further customized by the
user, the Logging Framework makes use of a header block to ensure that
it remains self-describing. This header entry can be see by running
the Unix utility ``head`` and outputting the first lines of the file:
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd bro -r $TRACES/wikipedia.trace
@TEST-EXEC: btest-rst-include -n 15 conn.log
As you can see, the header consists of lines prefixed by ``#`` and
includes information such as what separators are being used for
various types of data, what an empty field looks like and what an
unset field looks like. In this example, the default TAB separator is
being used as the delimiter between fields (``\x09`` is the tab
character in hex). It also lists the comma as the separator for set
data, the string ``(empty)`` as the indicator for an empty field and
the ``-`` character as the indicator for a field that hasn't been set.
The timestamp for when the file was created is included under
``#open``. The header then goes on to detail the fields being listed
in the file and the data types of those fields, in ``#fields`` and
``#types``, respectively. These two entries are often the two most
significant points of interest as they detail not only the field names
but the data types used. When navigating through the different log
files with tools like ``sed``, ``awk``, or ``grep``, having the field
definitions readily available saves the user some mental leg work. The
field names are also a key resource for using the :ref:`bro-cut
<bro-cut>` utility included with Bro, see below.
Next to the header follows the main content. In this example we see 7
connections with their key properties, such as originator and
responder IP addresses (note how Bro transparently handles both IPv4 and
IPv6), transport-layer ports, application-layer services ( - the
``service`` field is filled in as Bro determines a specific protocol to
be in use, independent of the connection's ports), payload size, and
more. See :bro:type:`Conn::Info` for a description of all fields.
In addition to ``conn.log``, Bro generates many further logs by
default, including:
``dpd.log``
A summary of protocols encountered on non-standard ports.
``dns.log``
All DNS activity.
``ftp.log``
A log of FTP session-level activity.
``files.log``
Summaries of files transferred over the network. This information
is aggregated from different protocols, including HTTP, FTP, and
SMTP.
``http.log``
A summary of all HTTP requests with their replies.
``known_certs.log``
SSL certificates seen in use.
``smtp.log``
A summary of SMTP activity.
``ssl.log``
A record of SSL sessions, including certificates being used.
``weird.log``
A log of unexpected protocol-level activity. Whenever Bro's
protocol analysis encounters a situation it would not expect
(e.g., an RFC violation) it logs it in this file. Note that in
practice, real-world networks tend to exhibit a large number of
such "crud" that is usually not worth following up on.
As you can see, some log files are specific to a particular protocol,
while others aggregate information across different types of activity.
.. _bro-cut:
Using ``bro-cut``
-----------------
The ``bro-cut`` utility can be used in place of other tools to build
terminal commands that remain flexible and accurate independent of
possible changes to the log file itself. It accomplishes this by parsing
the header in each file and allowing the user to refer to the specific
columnar data available (in contrast to tools like ``awk`` that
require the user to refer to fields referenced by their position).
For example, the following command extracts just the given columns
from a ``conn.log``:
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd -n 10 "cat conn.log | bro-cut id.orig_h id.orig_p id.resp_h duration"
The corresponding ``awk`` command will look like this:
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd -n 10 awk \'/^[^#]/ {print \$3, \$4, \$5, \$6, \$9}\' conn.log
While the output is similar, the advantages to using bro-cut over
``awk`` lay in that, while ``awk`` is flexible and powerful, ``bro-cut``
was specifically designed to work with Bro's log files. Firstly, the
``bro-cut`` output includes only the log file entries, while the
``awk`` solution needs to skip the header manually. Secondly, since
``bro-cut`` uses the field descriptors to identify and extract data,
it allows for flexibility independent of the format and contents of
the log file. It's not uncommon for a Bro configuration to add extra
fields to various log files as required by the environment. In this
case, the fields in the ``awk`` command would have to be altered to
compensate for the new position whereas the ``bro-cut`` output would
not change.
.. note::
The sequence of field names given to ``bro-cut`` determines the
output order, which means you can also use ``bro-cut`` to reorder
fields. That can be helpful when piping into, e.g., ``sort``.
As you may have noticed, the command for ``bro-cut`` uses the output
redirection through the ``cat`` command and ``|`` operator. Whereas
tools like ``awk`` allow you to indicate the log file as a command
line option, bro-cut only takes input through redirection such as
``|`` and ``<``. There are a couple of ways to direct log file data
into ``bro-cut``, each dependent upon the type of log file you're
processing. A caveat of its use, however, is that the 8 lines of
header data must be present.
.. note::
``bro-cut`` provides an option ``-c`` to include a corresponding
format header into the output, which allows to chain multiple
``bro-cut`` instances or perform further post-processing that
evaluates the header information.
In its default setup, Bro will rotate log files on an hourly basis,
moving the current log file into a directory with format
``YYYY-MM-DD`` and gzip compressing the file with a file format that
includes the log file type and time range of the file. In the case of
processing a compressed log file you simply adjust your command line
tools to use the complementary ``z*`` versions of commands such as cat
(``zcat``), ``grep`` (``zgrep``), and ``head`` (``zhead``).
Working with Timestamps
-----------------------
``bro-cut`` accepts the flag ``-d`` to convert the epoch time values
in the log files to human-readable format. The following command
includes the human readable time stamp, the unique identifier, the
HTTP ``Host``, and HTTP ``URI`` as extracted from the ``http.log``
file:
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd -n 5 "bro-cut -d ts uid host uri < http.log"
Often times log files from multiple sources are stored in UTC time to
allow easy correlation. Converting the timestamp from a log file to
UTC can be accomplished with the ``-u`` option:
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd -n 5 "bro-cut -u ts uid host uri < http.log"
The default time format when using the ``-d`` or ``-u`` is the
``strftime`` format string ``%Y-%m-%dT%H:%M:%S%z`` which results in a
string with year, month, day of month, followed by hour, minutes,
seconds and the timezone offset. The default format can be altered by
using the ``-D`` and ``-U`` flags, using the standard ``strftime``
syntax. For example, to format the timestamp in the US-typical "Middle
Endian" you could use a format string of: ``%d-%m-%YT%H:%M:%S%z``
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd -n 5 "bro-cut -D %d-%m-%YT%H:%M:%S%z ts uid host uri < http.log"
See ``man strfime`` for more options for the format string.
Using UIDs
----------
While Bro can do signature-based analysis, its primary focus is on
behavioral detection which alters the practice of log review from
"reactionary review" to a process a little more akin to a hunting
trip. A common progression of review includes correlating a session
across multiple log files. As a connection is processed by Bro, a
unique identifier is assigned to each session. This unique identifier
is generally included in any log file entry associated with that
connection and can be used to cross-reference different log files.
A simple example would be to cross-reference a UID seen in a
``conn.log`` file. Here, we're looking for the connection with the
largest number of bytes from the responder by redirecting the output
for ``cat conn.log`` into bro-cut to extract the UID and the
resp_bytes, then sorting that output by the resp_bytes field.
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd "cat conn.log | bro-cut uid resp_bytes | sort -nrk2 | head -5"
Taking the UID of the first of the top responses, we can now
crossreference that with the UIDs in the ``http.log`` file.
.. btest:: using_bro
@TEST-EXEC: btest-rst-cmd "cat http.log | bro-cut uid id.resp_h method status_code host uri | grep VW0XPVINV8a"
As you can see there are two HTTP ``GET`` requests within the
session that Bro identified and logged. Given that HTTP is a stream
protocol, it can have multiple ``GET``/``POST``/etc requests in a
stream and Bro is able to extract and track that information for you,
giving you an in-depth and structured view into HTTP traffic on your
network.
-----------------------
Common Log Files
-----------------------
As a monitoring tool, Bro records a detailed view of the traffic inspected
and the events generated in a series of relevant log files. These files can
later be reviewed for monitoring, auditing and troubleshooting purposes.
In this section we present a brief explanation of the most commonly used log
files generated by Bro including links to descriptions of some of the fields
for each log type.
+-----------------+---------------------------------------+------------------------------+
| Log File | Description | Field Descriptions |
+=================+=======================================+==============================+
| http.log | Shows all HTTP requests and replies | :bro:type:`HTTP::Info` |
+-----------------+---------------------------------------+------------------------------+
| ftp.log | Records FTP activity | :bro:type:`FTP::Info` |
+-----------------+---------------------------------------+------------------------------+
| ssl.log | Records SSL sessions including | :bro:type:`SSL::Info` |
| | certificates used | |
+-----------------+---------------------------------------+------------------------------+
| known_certs.log | Includes SSL certificates used | :bro:type:`Known::CertsInfo` |
+-----------------+---------------------------------------+------------------------------+
| smtp.log | Summarizes SMTP traffic on a network | :bro:type:`SMTP::Info` |
+-----------------+---------------------------------------+------------------------------+
| dns.log | Shows all DNS activity on a network | :bro:type:`DNS::Info` |
+-----------------+---------------------------------------+------------------------------+
| conn.log | Records all connections seen by Bro | :bro:type:`Conn::Info` |
+-----------------+---------------------------------------+------------------------------+
| dpd.log | Shows network activity on | :bro:type:`DPD::Info` |
| | non-standard ports | |
+-----------------+---------------------------------------+------------------------------+
| files.log | Records information about all files | :bro:type:`Files::Info` |
| | transmitted over the network | |
+-----------------+---------------------------------------+------------------------------+
| weird.log | Records unexpected protocol-level | :bro:type:`Weird::Info` |
| | activity | |
+-----------------+---------------------------------------+------------------------------+

71
doc/mimestats/index.rst Normal file
View file

@ -0,0 +1,71 @@
.. _mime-stats:
====================
MIME Type Statistics
====================
Files are constantly transmitted over HTTP on regular networks. These
files belong to a specific category (e.g., executable, text, image)
identified by a `Multipurpose Internet Mail Extension (MIME)
<http://en.wikipedia.org/wiki/MIME>`_. Although MIME was originally
developed to identify the type of non-text attachments on email, it is
also used by a web browser to identify the type of files transmitted and
present them accordingly.
In this tutorial, we will demonstrate how to use the Sumstats Framework
to collect statistical information based on MIME types; specifically,
the total number of occurrences, size in bytes, and number of unique
hosts transmitting files over HTTP per each type. For instructions on
extracting and creating a local copy of these files, visit :ref:`this
tutorial <http-monitor>`.
------------------------------------------------
MIME Statistics with Sumstats
------------------------------------------------
When working with the :ref:`Summary Statistics Framework
<sumstats-framework>`, you need to define three different pieces: (i)
Observations, where the event is observed and fed into the framework.
(ii) Reducers, where observations are collected and measured. (iii)
Sumstats, where the main functionality is implemented.
We start by defining our observation along with a record to store
all statistical values and an observation interval. We are conducting our
observation on the :bro:see:`HTTP::log_http` event and are interested
in the MIME type, size of the file ("response_body_len"), and the
originator host ("orig_h"). We use the MIME type as our key and create
observers for the other two values.
.. btest-include:: ${DOC_ROOT}/mimestats/mimestats.bro
:lines: 6-29, 54-64
Next, we create the reducers. The first will accumulate file sizes
and the second will make sure we only store a host ID once. Below is
the partial code from a :bro:see:`bro_init` handler.
.. btest-include:: ${DOC_ROOT}/mimestats/mimestats.bro
:lines: 34-37
In our final step, we create the SumStats where we check for the
observation interval. Once it expires, we populate the record
(defined above) with all the relevant data and write it to a log.
.. btest-include:: ${DOC_ROOT}/mimestats/mimestats.bro
:lines: 38-51
After putting the three pieces together we end up with the following final code for
our script.
.. btest-include:: ${DOC_ROOT}/mimestats/mimestats.bro
.. btest:: mimestats
@TEST-EXEC: btest-rst-cmd bro -r ${TRACES}/http/bro.org.pcap ${DOC_ROOT}/mimestats/mimestats.bro
@TEST-EXEC: btest-rst-include mime_metrics.log
.. note::
The redefinition of :bro:see:`Site::local_nets` is only done inside
this script to make it a self-contained example. It's typically
redefined somewhere else.

View file

@ -0,0 +1,64 @@
@load base/utils/site
@load base/frameworks/sumstats
redef Site::local_nets += { 10.0.0.0/8 };
module MimeMetrics;
export {
redef enum Log::ID += { LOG };
type Info: record {
## Timestamp when the log line was finished and written.
ts: time &log;
## Time interval that the log line covers.
ts_delta: interval &log;
## The mime type
mtype: string &log;
## The number of unique local hosts that fetched this mime type
uniq_hosts: count &log;
## The number of hits to the mime type
hits: count &log;
## The total number of bytes received by this mime type
bytes: count &log;
};
## The frequency of logging the stats collected by this script.
const break_interval = 5mins &redef;
}
event bro_init() &priority=3
{
Log::create_stream(MimeMetrics::LOG, [$columns=Info]);
local r1: SumStats::Reducer = [$stream="mime.bytes",
$apply=set(SumStats::SUM)];
local r2: SumStats::Reducer = [$stream="mime.hits",
$apply=set(SumStats::UNIQUE)];
SumStats::create([$name="mime-metrics",
$epoch=break_interval,
$reducers=set(r1, r2),
$epoch_result(ts: time, key: SumStats::Key, result: SumStats::Result) =
{
local l: Info;
l$ts = network_time();
l$ts_delta = break_interval;
l$mtype = key$str;
l$bytes = double_to_count(floor(result["mime.bytes"]$sum));
l$hits = result["mime.hits"]$num;
l$uniq_hosts = result["mime.hits"]$unique;
Log::write(MimeMetrics::LOG, l);
}]);
}
event HTTP::log_http(rec: HTTP::Info)
{
if ( Site::is_local_addr(rec$id$orig_h) && rec?$resp_mime_types )
{
local mime_type = rec$resp_mime_types[0];
SumStats::observe("mime.bytes", [$str=mime_type],
[$num=rec$response_body_len]);
SumStats::observe("mime.hits", [$str=mime_type],
[$str=cat(rec$id$orig_h)]);
}
}

View file

@ -1,39 +1,38 @@
.. _FAQ: http://www.bro.org/documentation/faq.html
.. _quickstart:
================= =================
Quick Start Guide Quick Start Guide
================= =================
.. rst-class:: opening
The short story for getting Bro up and running in a simple configuration
for analysis of either live traffic from a network interface or a packet
capture trace file.
.. contents:: .. contents::
Installation
============
Bro works on most modern, Unix-based systems and requires no custom Bro works on most modern, Unix-based systems and requires no custom
hardware. It can be downloaded in either pre-built binary package or hardware. It can be downloaded in either pre-built binary package or
source code forms. See :doc:`Installing Bro <INSTALL>` for instructions source code forms. See :ref:`installing-bro` for instructions on how to
on how to install Bro. install Bro.
.. note:: Below, ``$PREFIX`` is used to reference the Bro installation In the examples below, ``$PREFIX`` is used to reference the Bro
root directory. installation root directory, which by default is ``/usr/local/bro`` if
you install from source.
Using BroControl Managing Bro with BroControl
================ ============================
BroControl is an interactive shell for easily operating/managing Bro BroControl is an interactive shell for easily operating/managing Bro
installations on a single system or even across multiple systems in a installations on a single system or even across multiple systems in a
traffic-monitoring cluster. traffic-monitoring cluster. This section explains how to use BroControl
to manage a stand-alone Bro installation. For instructions on how to
configure a Bro cluster, see the :doc:`Cluster Configuration
<../configuration/index>` documentation.
A Minimal Starting Configuration A Minimal Starting Configuration
-------------------------------- --------------------------------
These are the basic configuration changes to make for a minimal BroControl installation These are the basic configuration changes to make for a minimal BroControl
that will manage a single Bro instance on the ``localhost``: installation that will manage a single Bro instance on the ``localhost``:
1) In ``$PREFIX/etc/node.cfg``, set the right interface to monitor. 1) In ``$PREFIX/etc/node.cfg``, set the right interface to monitor.
2) In ``$PREFIX/etc/networks.cfg``, comment out the default settings and add 2) In ``$PREFIX/etc/networks.cfg``, comment out the default settings and add
@ -68,9 +67,9 @@ policy and output the results in ``$PREFIX/logs``.
.. note:: The user starting BroControl needs permission to capture .. note:: The user starting BroControl needs permission to capture
network traffic. If you are not root, you may need to grant further network traffic. If you are not root, you may need to grant further
privileges to the account you're using; see the :doc:`FAQ <faq>`. privileges to the account you're using; see the FAQ_. Also, if it
Also, if it looks like Bro is not seeing any traffic, check out looks like Bro is not seeing any traffic, check out the FAQ entry on
the FAQ entry on checksum offloading. checksum offloading.
You can leave it running for now, but to stop this Bro instance you would do: You can leave it running for now, but to stop this Bro instance you would do:
@ -78,7 +77,8 @@ You can leave it running for now, but to stop this Bro instance you would do:
[BroControl] > stop [BroControl] > stop
We also recommend to insert the following entry into `crontab`:: We also recommend to insert the following entry into the crontab of the user
running BroControl::
0-59/5 * * * * $PREFIX/bin/broctl cron 0-59/5 * * * * $PREFIX/bin/broctl cron
@ -115,20 +115,18 @@ columns (shortened for brevity) show a request to the root of Bro website::
Some logs are worth explicit mention: Some logs are worth explicit mention:
``weird.log`` ``conn.log``
Contains unusual/exceptional activity that can indicate Contains an entry for every connection seen on the wire, with
malformed connections, traffic that doesn't conform to a particular basic properties such as time and duration, originator and
protocol, malfunctioning/misconfigured hardware, or even an attacker responder IP addresses, services and ports, payload size, and
attempting to avoid/confuse a sensor. Without context, it's hard to much more. This log provides a comprehensive record of the
judge whether this category of activity is interesting and so that is network's activity.
left up to the user to configure.
``notice.log`` ``notice.log``
Identifies specific activity that Bro recognizes as Identifies specific activity that Bro recognizes as
potentially interesting, odd, or bad. In Bro-speak, such potentially interesting, odd, or bad. In Bro-speak, such
activity is called a "notice". activity is called a "notice".
By default, ``BroControl`` regularly takes all the logs from By default, ``BroControl`` regularly takes all the logs from
``$PREFIX/logs/current`` and archives/compresses them to a directory ``$PREFIX/logs/current`` and archives/compresses them to a directory
named by date, e.g. ``$PREFIX/logs/2011-10-06``. The frequency at named by date, e.g. ``$PREFIX/logs/2011-10-06``. The frequency at
@ -162,7 +160,7 @@ changes we want to make:
attempt looks like it may have been successful, and we want email when attempt looks like it may have been successful, and we want email when
that happens, but only for certain servers. that happens, but only for certain servers.
So we've defined *what* we want to do, but need to know *where* to do it. We've defined *what* we want to do, but need to know *where* to do it.
The answer is to use a script written in the Bro programming language, so The answer is to use a script written in the Bro programming language, so
let's do a quick intro to Bro scripting. let's do a quick intro to Bro scripting.
@ -188,7 +186,7 @@ must explicitly choose if they want to load them.
The main entry point for the default analysis configuration of a standalone The main entry point for the default analysis configuration of a standalone
Bro instance managed by BroControl is the ``$PREFIX/share/bro/site/local.bro`` Bro instance managed by BroControl is the ``$PREFIX/share/bro/site/local.bro``
script. So we'll be adding to that in the following sections, but first script. We'll be adding to that in the following sections, but first
we have to figure out what to add. we have to figure out what to add.
Redefining Script Option Variables Redefining Script Option Variables
@ -204,8 +202,8 @@ A redefineable constant might seem strange, but what that really means is that
the variable's value may not change at run-time, but whose initial value can be the variable's value may not change at run-time, but whose initial value can be
modified via the ``redef`` operator at parse-time. modified via the ``redef`` operator at parse-time.
So let's continue on our path to modify the behavior for the two SSL Let's continue on our path to modify the behavior for the two SSL
and SSH notices. Looking at :doc:`scripts/base/frameworks/notice/main`, and SSH notices. Looking at :doc:`/scripts/base/frameworks/notice/main.bro`,
we see that it advertises: we see that it advertises:
.. code:: bro .. code:: bro
@ -218,7 +216,7 @@ we see that it advertises:
const ignored_types: set[Notice::Type] = {} &redef; const ignored_types: set[Notice::Type] = {} &redef;
} }
That's exactly what we want to do for the SSL notice. So add to ``local.bro``: That's exactly what we want to do for the SSL notice. Add to ``local.bro``:
.. code:: bro .. code:: bro
@ -270,14 +268,11 @@ that only takes the email action for SSH logins to a defined set of servers:
192.168.1.102, 192.168.1.102,
} &redef; } &redef;
redef Notice::policy += { hook Notice::policy(n: Notice::Info)
[$action = Notice::ACTION_EMAIL, {
$pred(n: Notice::Info) = if ( n$note == SSH::SUCCESSFUL_LOGIN && n$id$resp_h in watched_servers )
{ add n$actions[Notice::ACTION_EMAIL];
return n$note == SSH::Login && n$id$resp_h in watched_servers; }
}
]
};
You'll just have to trust the syntax for now, but what we've done is You'll just have to trust the syntax for now, but what we've done is
first declare our own variable to hold a set of watched addresses, first declare our own variable to hold a set of watched addresses,
@ -286,9 +281,9 @@ an email on the condition that the predicate function evaluates to true, which
is whenever the notice type is an SSH login and the responding host stored is whenever the notice type is an SSH login and the responding host stored
inside the ``Info`` record's connection field is in the set of watched servers. inside the ``Info`` record's connection field is in the set of watched servers.
.. note:: record field member access is done with the '$' character .. note:: Record field member access is done with the '$' character
instead of a '.' as might be expected from other languages, in instead of a '.' as might be expected from other languages, in
order to avoid ambiguity with the builtin address type's use of '.' order to avoid ambiguity with the built-in address type's use of '.'
in IPv4 dotted decimal representations. in IPv4 dotted decimal representations.
Remember, to finalize that configuration change perform the ``check``, Remember, to finalize that configuration change perform the ``check``,
@ -302,21 +297,27 @@ tweak the most basic options. Here's some suggestions on what to explore next:
* We only looked at how to change options declared in the notice framework, * We only looked at how to change options declared in the notice framework,
there's many more options to look at in other script packages. there's many more options to look at in other script packages.
* Continue reading with :ref:`Using Bro <using-bro>` chapter which goes
into more depth on working with Bro; then look at
:ref:`writing-scripts` for learning how to start writing your own
scripts.
* Look at the scripts in ``$PREFIX/share/bro/policy`` for further ones * Look at the scripts in ``$PREFIX/share/bro/policy`` for further ones
you may want to load. you may want to load; you can browse their documentation at the
:ref:`overview of script packages <script-packages>`.
* Reading the code of scripts that ship with Bro is also a great way to gain * Reading the code of scripts that ship with Bro is also a great way to gain
understanding of the language and how you can start writing your own custom further understanding of the language and how scripts tend to be
analysis. structured.
* Review the :doc:`FAQ <faq>`. * Review the FAQ_.
* Continue reading below for another mini-tutorial on using Bro as a standalone * Continue reading below for another mini-tutorial on using Bro as a standalone
command-line utility. command-line utility.
Bro, the Command-Line Utility Bro as a Command-Line Utility
============================= =============================
If you prefer not to use BroControl (e.g. don't need its automation and If you prefer not to use BroControl (e.g. don't need its automation
management features), here's how to directly control Bro for your analysis and management features), here's how to directly control Bro for your
activities. analysis activities from the command line for both live traffic and
offline working from traces.
Monitoring Live Traffic Monitoring Live Traffic
----------------------- -----------------------
@ -333,9 +334,9 @@ that's available.
Bro will output log files into the working directory. Bro will output log files into the working directory.
.. note:: The :doc:`FAQ <faq>` entries about .. note:: The FAQ_ entries about
capturing as an unprivileged user and checksum offloading are particularly capturing as an unprivileged user and checksum offloading are
relevant at this point. particularly relevant at this point.
To use the site-specific ``local.bro`` script, just add it to the To use the site-specific ``local.bro`` script, just add it to the
command-line: command-line:
@ -412,7 +413,7 @@ logging) and adds SSL certificate validation.
You might notice that a script you load from the command line uses the You might notice that a script you load from the command line uses the
``@load`` directive in the Bro language to declare dependence on other scripts. ``@load`` directive in the Bro language to declare dependence on other scripts.
This directive is similar to the ``#include`` of C/C++, except the semantics This directive is similar to the ``#include`` of C/C++, except the semantics
are "load this script if it hasn't already been loaded". are, "load this script if it hasn't already been loaded."
.. note:: If one wants Bro to be able to load scripts that live outside the .. note:: If one wants Bro to be able to load scripts that live outside the
default directories in Bro's installation root, the ``BROPATH`` environment default directories in Bro's installation root, the ``BROPATH`` environment
@ -420,3 +421,19 @@ are "load this script if it hasn't already been loaded".
to be searched for scripts. See the default search path by doing to be searched for scripts. See the default search path by doing
``bro --help``. ``bro --help``.
Running Bro Without Installing
------------------------------
For developers that wish to run Bro directly from the ``build/``
directory (i.e., without performing ``make install``), they will have
to first adjust ``BROPATH`` to look for scripts and
additional files inside the build directory. Sourcing either
``build/bro-path-dev.sh`` or ``build/bro-path-dev.csh`` as appropriate
for the current shell accomplishes this and also augments your
``PATH`` so you can use the Bro binary directly::
./configure
make
source build/bro-path-dev.sh
bro <options>

View file

@ -1,194 +0,0 @@
Reporting Problems
==================
.. rst-class:: opening
Here we summarize some steps to follow when you see Bro doing
something it shouldn't. To provide help, it is often crucial for
us to have a way of reliably reproducing the effect you're seeing.
Unfortunately, reproducing problems can be rather tricky with Bro
because more often than not, they occur only in either very rare
situations or only after Bro has been running for some time. In
particular, getting a small trace showing a specific effect can be
a real problem. In the following, we'll summarize some strategies
to this end.
Reporting Problems
------------------
Generally, when you encounter a problem with Bro, the best thing to do
is opening a new ticket in `Bro's issue tracker
<http://tracker.bro.org/>`__ and include information on how to
reproduce the issue. Ideally, your ticket should come with the
following:
* The Bro version you're using (if working directly from the git
repository, the branch and revision number.)
* The output you're seeing along with a description of what you'd expect
Bro to do instead.
* A *small* trace in `libpcap format <http://www.tcpdump.org>`__
demonstrating the effect (assuming the problem doesn't happen right
at startup already).
* The exact command-line you're using to run Bro with that trace. If
you can, please try to run the Bro binary directly from the command
line rather than using BroControl.
* Any non-standard scripts you're using (but please only those really
necessary; just a small code snippet triggering the problem would
be perfect).
* If you encounter a crash, information from the core dump, such as
the stack backtrace, can be very helpful. See below for more on
this.
How Do I Get a Trace File?
--------------------------
As Bro is usually running live, coming up with a small trace file that
reproduces a problem can turn out to be quite a challenge. Often it
works best to start with a large trace that triggers the problem,
and then successively thin it out as much as possible.
To get to the initial large trace, here are a few things you can try:
* Capture a trace with `tcpdump <http://www.tcpdump.org/>`__, either
on the same interface Bro is running on, or on another host where
you can generate traffic of the kind likely triggering the problem
(e.g., if you're seeing problems with the HTTP analyzer, record some
of your Web browsing on your desktop.) When using tcpdump, don't
forget to record *complete* packets (``tcpdump -s 0 ...``). You can
reduce the amount of traffic captured by using a suitable BPF filter
(e.g., for HTTP only, try ``port 80``).
* Bro's command-line option ``-w <trace>`` records all packets it
processes into the given file. You can then later run Bro
offline on this trace and it will process the packets in the same
way as it did live. This is particularly helpful with problems that
only occur after Bro has already been running for some time. For
example, sometimes a crash may be triggered by a particular kind of
traffic only occurring rarely. Running Bro live with ``-w`` and
then, after the crash, offline on the recorded trace might, with a
little bit of luck, reproduce the problem reliably. However, be
careful with ``-w``: it can result in huge trace files, quickly
filling up your disk. (One way to mitigate the space issues is to
periodically delete the trace file by configuring
``rotate-logs.bro`` accordingly. BroControl does that for you if you
set its ``SaveTraces`` option.)
* Finally, you can try running Bro on a publically available trace
file, such as `anonymized FTP traffic <http://www-nrg.ee.lbl.gov
/anonymized-traces.html>`__, `headers-only enterprise traffic
<http://www.icir.org/enterprise-tracing/Overview.html>`__, or
`Defcon traffic <http://cctf.shmoo.com/>`__. Some of these
particularly stress certain components of Bro (e.g., the Defcon
traces contain tons of scans).
Once you have a trace that demonstrates the effect, you will often
notice that it's pretty big, in particular if recorded from the link
you're monitoring. Therefore, the next step is to shrink its size as
much as possible. Here are a few things you can try to this end:
* Very often, a single connection is able to demonstrate the problem.
If you can identify which one it is (e.g., from one of Bro's
``*.log`` files) you can extract the connection's packets from the
trace using tcpdump by filtering for the corresponding 4-tuple of
addresses and ports:
.. console::
> tcpdump -r large.trace -w small.trace host <ip1> and port <port1> and host <ip2> and port <port2>
* If you can't reduce the problem to a connection, try to identify
either a host pair or a single host triggering it, and filter down
the trace accordingly.
* You can try to extract a smaller time slice from the trace using
`TCPslice <http://www.tcpdump.org/related.html>`__. For example, to
extract the first 100 seconds from the trace:
.. console::
# Test comment
> tcpslice +100 <in >out
Alternatively, tcpdump extracts the first ``n`` packets with its
option ``-c <n>``.
Getting More Information After a Crash
--------------------------------------
If Bro crashes, a *core dump* can be very helpful to nail down the
problem. Examining a core is not for the faint of heart but can reveal
extremely useful information.
First, you should configure Bro with the option ``--enable-debug`` and
recompile; this will disable all compiler optimizations and thus make
the core dump more useful (don't expect great performance with this
version though; compiling Bro without optimization has a noticeable
impact on its CPU usage.). Then enable core dumps if you haven't
already (e.g., ``ulimit -c unlimited`` if you're using bash).
Once Bro has crashed, start gdb with the Bro binary and the file
containing the core dump. (Alternatively, you can also run Bro
directly inside gdb instead of working from a core file.) The first
helpful information to include with your tracker ticket is a stack
backtrace, which you get with gdb's ``bt`` command:
.. console::
> gdb bro core
[...]
> bt
If the crash occurs inside Bro's script interpreter, the next thing to
do is identifying the line of script code processed just before the
abnormal termination. Look for methods in the stack backtrace which
belong to any of the script interpreter's classes. Roughly speaking,
these are all classes with names ending in ``Expr``, ``Stmt``, or
``Val``. Then climb up the stack with ``up`` until you reach the first
of these methods. The object to which ``this`` is pointing will have a
``Location`` object, which in turn contains the file name and line
number of the corresponding piece of script code. Continuing the
example from above, here's how to get that information:
.. console::
[in gdb]
> up
> ...
> up
> print this->location->filename
> print this->location->first_line
If the crash occurs while processing input packets but you cannot
directly tell which connection is responsible (and thus not extract
its packets from the trace as suggested above), try getting the
4-tuple of the connection currently being processed from the core dump
by again examining the stack backtrace, this time looking for methods
belonging to the ``Connection`` class. That class has members
``orig_addr``/``resp_addr`` and ``orig_port``/``resp_port`` storing
(pointers to) the IP addresses and ports respectively:
.. console::
[in gdb]
> up
> ...
> up
> printf "%08x:%04x %08x:%04x\n", *this->orig_addr, this->orig_port, *this->resp_addr, this->resp_port
Note that these values are stored in `network byte order
<http://en.wikipedia.org/wiki/Endianness#Endianness_in_networking>`__
so you will need to flip the bytes around if you are on a low-endian
machine (which is why the above example prints them in hex). For
example, if an IP address prints as ``0100007f`` , that's 127.0.0.1 .

View file

@ -1,55 +1,106 @@
Builtin Types and Attributes Types and Attributes
============================ ====================
Types Types
----- -----
Every value in a Bro script has a type (see below for a list of all built-in
types). Although Bro variables have static types (meaning that their type
is fixed), their type is inferred from the value to which they are
initially assigned when the variable is declared without an explicit type
name.
Automatic conversions happen when a binary operator has operands of
different types. Automatic conversions are limited to converting between
numeric types. The numeric types are ``int``, ``count``, and ``double``
(``bool`` is not a numeric type).
When an automatic conversion occurs, values are promoted to the "highest"
type in the expression. In general, this promotion follows a simple
hierarchy: ``double`` is highest, ``int`` comes next, and ``count`` is
lowest.
The Bro scripting language supports the following built-in types. The Bro scripting language supports the following built-in types.
.. bro:type:: void .. bro:type:: void
An internal Bro type representing an absence of a type. Should An internal Bro type (i.e., "void" is not a reserved keyword in the Bro
most often be seen as a possible function return type. scripting language) representing the absence of a return type for a
function.
.. bro:type:: bool .. bro:type:: bool
Reflects a value with one of two meanings: true or false. The two Reflects a value with one of two meanings: true or false. The two
``bool`` constants are ``T`` and ``F``. ``bool`` constants are ``T`` and ``F``.
The ``bool`` type supports the following operators: equality/inequality
(``==``, ``!=``), logical and/or (``&&``, ``||``), logical
negation (``!``), and absolute value (where ``|T|`` is 1, and ``|F|`` is 0).
.. bro:type:: int .. bro:type:: int
A numeric type representing a signed integer. An ``int`` constant A numeric type representing a 64-bit signed integer. An ``int`` constant
is a string of digits preceded by a ``+`` or ``-`` sign, e.g. is a string of digits preceded by a ``+`` or ``-`` sign, e.g.
``-42`` or ``+5``. When using type inferencing use care so that the ``-42`` or ``+5`` (the "+" sign is optional but see note about type
inferencing below). An ``int`` constant can also be written in
hexadecimal notation (in which case "0x" must be between the sign and
the hex digits), e.g. ``-0xFF`` or ``+0xabc123``.
The ``int`` type supports the following operators: arithmetic
operators (``+``, ``-``, ``*``, ``/``, ``%``), comparison operators
(``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), assignment operators
(``=``, ``+=``, ``-=``), pre-increment (``++``), pre-decrement
(``--``), and absolute value (e.g., ``|-3|`` is 3).
When using type inferencing use care so that the
intended type is inferred, e.g. ``local size_difference = 0`` will intended type is inferred, e.g. ``local size_difference = 0`` will
infer :bro:type:`count`, while ``local size_difference = +0`` infer :bro:type:`count`, while ``local size_difference = +0``
will infer :bro:type:`int`. will infer :bro:type:`int`.
.. bro:type:: count .. bro:type:: count
A numeric type representing an unsigned integer. A ``count`` A numeric type representing a 64-bit unsigned integer. A ``count``
constant is a string of digits, e.g. ``1234`` or ``0``. constant is a string of digits, e.g. ``1234`` or ``0``. A ``count``
can also be written in hexadecimal notation (in which case "0x" must
precede the hex digits), e.g. ``0xff`` or ``0xABC123``.
The ``count`` type supports the same operators as the :bro:type:`int`
type. A unary plus or minus applied to a ``count`` results in an ``int``.
.. bro:type:: counter .. bro:type:: counter
An alias to :bro:type:`count`. An alias to :bro:type:`count`.
.. TODO: is there anything special about this type?
.. bro:type:: double .. bro:type:: double
A numeric type representing a double-precision floating-point A numeric type representing a double-precision floating-point
number. Floating-point constants are written as a string of digits number. Floating-point constants are written as a string of digits
with an optional decimal point, optional scale-factor in scientific with an optional decimal point, optional scale-factor in scientific
notation, and optional ``+`` or ``-`` sign. Examples are ``-1234``, notation, and optional ``+`` or ``-`` sign. Examples are ``-1234``,
``-1234e0``, ``3.14159``, and ``.003e-23``. ``-1234e0``, ``3.14159``, and ``.003E-23``.
The ``double`` type supports the following operators: arithmetic
operators (``+``, ``-``, ``*``, ``/``), comparison operators
(``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), assignment operators
(``=``, ``+=``, ``-=``), and absolute value (e.g., ``|-3.14|`` is 3.14).
When using type inferencing use care so that the
intended type is inferred, e.g. ``local size_difference = 5`` will
infer :bro:type:`count`, while ``local size_difference = 5.0``
will infer :bro:type:`double`.
.. bro:type:: time .. bro:type:: time
A temporal type representing an absolute time. There is currently A temporal type representing an absolute time. There is currently
no way to specify a ``time`` constant, but one can use the no way to specify a ``time`` constant, but one can use the
:bro:id:`current_time` or :bro:id:`network_time` built-in functions :bro:id:`double_to_time`, :bro:id:`current_time`, or :bro:id:`network_time`
to assign a value to a ``time``-typed variable. built-in functions to assign a value to a ``time``-typed variable.
Time values support the comparison operators (``==``, ``!=``, ``<``,
``<=``, ``>``, ``>=``). A ``time`` value can be subtracted from
another ``time`` value to produce an ``interval`` value. An ``interval``
value can be added to, or subtracted from, a ``time`` value to produce a
``time`` value. The absolute value of a ``time`` value is a ``double``
with the same numeric value.
.. bro:type:: interval .. bro:type:: interval
@ -61,16 +112,44 @@ The Bro scripting language supports the following built-in types.
constant and time unit is optional. Appending the letter "s" to the constant and time unit is optional. Appending the letter "s" to the
time unit in order to pluralize it is also optional (to no semantic time unit in order to pluralize it is also optional (to no semantic
effect). Examples of ``interval`` constants are ``3.5 min`` and effect). Examples of ``interval`` constants are ``3.5 min`` and
``3.5mins``. An ``interval`` can also be negated, for example ``- ``3.5mins``. An ``interval`` can also be negated, for example
12 hr`` represents "twelve hours in the past". Intervals also ``-12 hr`` represents "twelve hours in the past".
support addition, subtraction, multiplication, division, and
comparison operations. Intervals support addition and subtraction. Intervals also support
division (in which case the result is a ``double`` value), the
comparison operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``),
and the assignment operators (``=``, ``+=``, ``-=``). Also, an
``interval`` can be multiplied or divided by an arithmetic type
(``count``, ``int``, or ``double``) to produce an ``interval`` value.
The absolute value of an ``interval`` is a ``double`` value equal to the
number of seconds in the ``interval`` (e.g., ``|-1 min|`` is 60).
.. bro:type:: string .. bro:type:: string
A type used to hold character-string values which represent text. A type used to hold character-string values which represent text.
String constants are created by enclosing text in double quotes (") String constants are created by enclosing text in double quotes (")
and the backslash character (\\) introduces escape sequences. and the backslash character (\\) introduces escape sequences (all of
the C-style escape sequences are supported).
Strings support concatenation (``+``), and assignment (``=``, ``+=``).
Strings also support the comparison operators (``==``, ``!=``, ``<``,
``<=``, ``>``, ``>=``). The number of characters in a string can be
found by enclosing the string within pipe characters (e.g., ``|"abc"|``
is 3).
The subscript operator can extract an individual character or a substring
of a string (string indexing is zero-based, but an index of
-1 refers to the last character in the string, and -2 refers to the
second-to-last character, etc.). When extracting a substring, the
starting and ending index values are separated by a colon. For example::
local orig = "0123456789";
local third_char = orig[2];
local last_char = orig[-1];
local first_three_chars = orig[0:2];
Substring searching can be performed using the "in" or "!in"
operators (e.g., "bar" in "foobar" yields true).
Note that Bro represents strings internally as a count and vector of Note that Bro represents strings internally as a count and vector of
bytes rather than a NUL-terminated byte string (although string bytes rather than a NUL-terminated byte string (although string
@ -127,9 +206,7 @@ The Bro scripting language supports the following built-in types.
.. bro:type:: enum .. bro:type:: enum
A type allowing the specification of a set of related values that A type allowing the specification of a set of related values that
have no further structure. The only operations allowed on have no further structure. An example declaration:
enumerations are equality comparisons and they do not have
associated values or ordering. An example declaration:
.. code:: bro .. code:: bro
@ -137,9 +214,9 @@ The Bro scripting language supports the following built-in types.
The last comma after ``Blue`` is optional. The last comma after ``Blue`` is optional.
.. bro:type:: timer The only operations allowed on enumerations are equality comparisons
(``==``, ``!=``) and assignment (``=``).
.. TODO: is this a type that's exposed to users? Enumerations do not have associated values or ordering.
.. bro:type:: port .. bro:type:: port
@ -149,10 +226,15 @@ The Bro scripting language supports the following built-in types.
message code. A ``port`` constant is written as an unsigned integer message code. A ``port`` constant is written as an unsigned integer
followed by one of ``/tcp``, ``/udp``, ``/icmp``, or ``/unknown``. followed by one of ``/tcp``, ``/udp``, ``/icmp``, or ``/unknown``.
Ports can be compared for equality and also for ordering. When Ports support the comparison operators (``==``, ``!=``, ``<``, ``<=``,
comparing order across transport-level protocols, ``unknown`` < ``>``, ``>=``). When comparing order across transport-level protocols,
``tcp`` < ``udp`` < ``icmp``, for example ``65535/tcp`` is smaller ``unknown`` < ``tcp`` < ``udp`` < ``icmp``, for example ``65535/tcp``
than ``0/udp``. is smaller than ``0/udp``.
Note that you can obtain the transport-level protocol type of a ``port``
with the :bro:id:`get_port_transport_proto` built-in function, and
the numeric value of a ``port`` with the :bro:id:`port_to_count`
built-in function.
.. bro:type:: addr .. bro:type:: addr
@ -162,22 +244,29 @@ The Bro scripting language supports the following built-in types.
``A1.A2.A3.A4``, where Ai all lie between 0 and 255. ``A1.A2.A3.A4``, where Ai all lie between 0 and 255.
IPv6 address constants are written as colon-separated hexadecimal form IPv6 address constants are written as colon-separated hexadecimal form
as described by :rfc:`2373`, but additionally encased in square brackets. as described by :rfc:`2373` (including the mixed notation with embedded
The mixed notation with embedded IPv4 addresses as dotted-quads in the IPv4 addresses as dotted-quads in the lower 32 bits), but additionally
lower 32 bits is also allowed. encased in square brackets. Some examples: ``[2001:db8::1]``,
Some examples: ``[2001:db8::1]``, ``[::ffff:192.168.1.100]``, or ``[::ffff:192.168.1.100]``, or
``[aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222]``. ``[aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222]``.
Note that IPv4-mapped IPv6 addresses (i.e., addresses with the first 80
bits zero, the next 16 bits one, and the remaining 32 bits are the IPv4
address) are treated internally as IPv4 addresses (for example,
``[::ffff:192.168.1.100]`` is equal to ``192.168.1.100``).
Hostname constants can also be used, but since a hostname can Hostname constants can also be used, but since a hostname can
correspond to multiple IP addresses, the type of such variable is a correspond to multiple IP addresses, the type of such a variable is a
:bro:type:`set` of :bro:type:`addr` elements. For example: :bro:type:`set` of :bro:type:`addr` elements. For example:
.. code:: bro .. code:: bro
local a = www.google.com; local a = www.google.com;
Addresses can be compared for (in)equality using ``==`` and ``!=``. Addresses can be compared for equality (``==``, ``!=``),
They can also be masked with ``/`` to produce a :bro:type:`subnet`: and also for ordering (``<``, ``<=``, ``>``, ``>=``). The absolute value
of an address gives the size in bits (32 for IPv4, and 128 for IPv6).
Addresses can also be masked with ``/`` to produce a :bro:type:`subnet`:
.. code:: bro .. code:: bro
@ -186,7 +275,8 @@ The Bro scripting language supports the following built-in types.
if ( a/16 == s ) if ( a/16 == s )
print "true"; print "true";
And checked for inclusion within a :bro:type:`subnet` using ``in`` : And checked for inclusion within a :bro:type:`subnet` using ``in``
or ``!in``:
.. code:: bro .. code:: bro
@ -195,6 +285,9 @@ The Bro scripting language supports the following built-in types.
if ( a in s ) if ( a in s )
print "true"; print "true";
Note that you can check if a given ``addr`` is IPv4 or IPv6 using
the :bro:id:`is_v4_addr` and :bro:id:`is_v6_addr` built-in functions.
.. bro:type:: subnet .. bro:type:: subnet
A type representing a block of IP addresses in CIDR notation. A A type representing a block of IP addresses in CIDR notation. A
@ -202,6 +295,10 @@ The Bro scripting language supports the following built-in types.
slash (/) and then the network prefix size specified as a decimal slash (/) and then the network prefix size specified as a decimal
number. For example, ``192.168.0.0/16`` or ``[fe80::]/64``. number. For example, ``192.168.0.0/16`` or ``[fe80::]/64``.
Subnets can be compared for equality (``==``, ``!=``). An
:bro:type:`addr` can be checked for inclusion in a subnet using
the "in" or "!in" operators.
.. bro:type:: any .. bro:type:: any
Used to bypass strong typing. For example, a function can take an Used to bypass strong typing. For example, a function can take an
@ -271,14 +368,14 @@ The Bro scripting language supports the following built-in types.
global t3 = MyTable([[$b=5]] = "b5", [[$b=7]] = "b7"); global t3 = MyTable([[$b=5]] = "b5", [[$b=7]] = "b7");
Accessing table elements if provided by enclosing values within square Accessing table elements is provided by enclosing index values within
brackets (``[]``), for example: square brackets (``[]``), for example:
.. code:: bro .. code:: bro
t[13] = "thirteen"; print t[11];
And membership can be tested with ``in``: And membership can be tested with ``in`` or ``!in``:
.. code:: bro .. code:: bro
@ -297,17 +394,23 @@ The Bro scripting language supports the following built-in types.
for ( [a, p] in services ) for ( [a, p] in services )
... ...
Add or overwrite individual table elements by assignment:
.. code:: bro
t[13] = "thirteen";
Remove individual table elements with ``delete``: Remove individual table elements with ``delete``:
.. code:: bro .. code:: bro
delete t[13]; delete t[13];
Nothing happens if the element with value ``13`` isn't present in Nothing happens if the element with index value ``13`` isn't present in
the table. the table.
Table size can be obtained by placing the table identifier between The number of elements in a table can be obtained by placing the table
vertical pipe (|) characters: identifier between vertical pipe characters:
.. code:: bro .. code:: bro
@ -355,27 +458,44 @@ The Bro scripting language supports the following built-in types.
global s4 = MySet([$b=1], [$b=2]); global s4 = MySet([$b=1], [$b=2]);
Set membership is tested with ``in``: Set membership is tested with ``in`` or ``!in``:
.. code:: bro .. code:: bro
if ( 21/tcp in s ) if ( 21/tcp in s )
... ...
if ( 21/tcp !in s )
...
Iterate over a set with a ``for`` loop:
.. code:: bro
local s: set[port];
for ( p in s )
...
Elements are added with ``add``: Elements are added with ``add``:
.. code:: bro .. code:: bro
add s[22/tcp]; add s[22/tcp];
Nothing happens if the element with value ``22/tcp`` was already present in
the set.
And removed with ``delete``: And removed with ``delete``:
.. code:: bro .. code:: bro
delete s[21/tcp]; delete s[21/tcp];
Set size can be obtained by placing the set identifier between Nothing happens if the element with value ``21/tcp`` isn't present in
vertical pipe (|) characters: the set.
The number of elements in a set can be obtained by placing the set
identifier between vertical pipe characters:
.. code:: bro .. code:: bro
@ -384,7 +504,8 @@ The Bro scripting language supports the following built-in types.
.. bro:type:: vector .. bro:type:: vector
A vector is like a :bro:type:`table`, except it's always indexed by a A vector is like a :bro:type:`table`, except it's always indexed by a
:bro:type:`count`. A vector is declared like: :bro:type:`count` (and vector indexing is always zero-based). A vector
is declared like:
.. code:: bro .. code:: bro
@ -411,21 +532,51 @@ The Bro scripting language supports the following built-in types.
global v2 = MyVec([$b=1], [$b=2], [$b=3]); global v2 = MyVec([$b=1], [$b=2], [$b=3]);
Adding an element to a vector involves accessing/assigning it: Accessing vector elements is provided by enclosing index values within
square brackets (``[]``), for example:
.. code:: bro .. code:: bro
v[3] = "four" print v[2];
Note how the vector indexing is 0-based. Iterate over a vector with a ``for`` loop:
Vector size can be obtained by placing the vector identifier between .. code:: bro
vertical pipe (|) characters:
local v: vector of string;
for ( n in v )
...
An element can be added to a vector by assigning the value (a value
that already exists at that index will be overwritten):
.. code:: bro
v[3] = "four";
The number of elements in a vector can be obtained by placing the vector
identifier between vertical pipe characters:
.. code:: bro .. code:: bro
|v| |v|
Vectors of integral types (``int`` or ``count``) support the pre-increment
(``++``) and pre-decrement operators (``--``), which will increment or
decrement each element in the vector.
Vectors of arithmetic types (``int``, ``count``, or ``double``) can be
operands of the arithmetic operators (``+``, ``-``, ``*``, ``/``, ``%``),
but both operands must have the same number of elements (and the modulus
operator ``%`` cannot be used if either operand is a ``vector of double``).
The resulting vector contains the result of the operation applied to each
of the elements in the operand vectors.
Vectors of bool can be operands of the logical "and" (``&&``) and logical
"or" (``||``) operators (both operands must have same number of elements).
The resulting vector of bool is the logical "and" (or logical "or") of
each element of the operand vectors.
.. bro:type:: record .. bro:type:: record
A ``record`` is a collection of values. Each value has a field name A ``record`` is a collection of values. Each value has a field name
@ -504,9 +655,11 @@ The Bro scripting language supports the following built-in types.
.. bro:type:: file .. bro:type:: file
Bro supports writing to files, but not reading from them. For Bro supports writing to files, but not reading from them. Files
example, declare, open, and write to a file and finally close it can be opened using either the :bro:id:`open` or :bro:id:`open_for_append`
like: built-in functions, and closed using the :bro:id:`close` built-in
function. For example, declare, open, and write to a file
and finally close it like:
.. code:: bro .. code:: bro
@ -515,7 +668,7 @@ The Bro scripting language supports the following built-in types.
close(f); close(f);
Writing to files like this for logging usually isn't recommended, for better Writing to files like this for logging usually isn't recommended, for better
logging support see :doc:`/logging`. logging support see :doc:`/frameworks/logging`.
.. bro:type:: function .. bro:type:: function
@ -544,8 +697,8 @@ The Bro scripting language supports the following built-in types.
Note that in the definition above, it's not necessary for us to have Note that in the definition above, it's not necessary for us to have
done the first (forward) declaration of ``greeting`` as a function done the first (forward) declaration of ``greeting`` as a function
type, but when it is, the argument list and return type much match type, but when it is, the return type and argument list (including the
exactly. name of each argument) must match exactly.
Function types don't need to have a name and can be assigned anonymously: Function types don't need to have a name and can be assigned anonymously:
@ -628,7 +781,7 @@ The Bro scripting language supports the following built-in types.
.. bro:type:: hook .. bro:type:: hook
A hook is another flavor of function that shares characteristics of A hook is another flavor of function that shares characteristics of
both a :bro:type:`function` and a :bro:type:`event`. They are like both a :bro:type:`function` and an :bro:type:`event`. They are like
events in that many handler bodies can be defined for the same hook events in that many handler bodies can be defined for the same hook
identifier and the order of execution can be enforced with identifier and the order of execution can be enforced with
:bro:attr:`&priority`. They are more like functions in the way they :bro:attr:`&priority`. They are more like functions in the way they
@ -717,14 +870,14 @@ scripting language supports the following built-in attributes.
.. bro:attr:: &optional .. bro:attr:: &optional
Allows a record field to be missing. For example the type ``record { Allows a record field to be missing. For example the type ``record {
a: int, b: port &optional }`` could be instantiated both as a: addr; b: port &optional; }`` could be instantiated both as
singleton ``[$a=127.0.0.1]`` or pair ``[$a=127.0.0.1, $b=80/tcp]``. singleton ``[$a=127.0.0.1]`` or pair ``[$a=127.0.0.1, $b=80/tcp]``.
.. bro:attr:: &default .. bro:attr:: &default
Uses a default value for a record field, a function/hook/event Uses a default value for a record field, a function/hook/event
parameter, or container elements. For example, ``table[int] of parameter, or container elements. For example, ``table[int] of
string &default="foo" }`` would create a table that returns the string &default="foo"`` would create a table that returns the
:bro:type:`string` ``"foo"`` for any non-existing index. :bro:type:`string` ``"foo"`` for any non-existing index.
.. bro:attr:: &redef .. bro:attr:: &redef
@ -744,18 +897,25 @@ scripting language supports the following built-in attributes.
.. bro:attr:: &add_func .. bro:attr:: &add_func
.. TODO: needs to be documented. Can be applied to an identifier with &redef to specify a function to
be called any time a "redef <id> += ..." declaration is parsed. The
function takes two arguments of the same type as the identifier, the first
being the old value of the variable and the second being the new
value given after the "+=" operator in the "redef" declaration. The
return value of the function will be the actual new value of the
variable after the "redef" declaration is parsed.
.. bro:attr:: &delete_func .. bro:attr:: &delete_func
.. TODO: needs to be documented. Same as &add_func, except for "redef" declarations that use the "-="
operator.
.. bro:attr:: &expire_func .. bro:attr:: &expire_func
Called right before a container element expires. The function's Called right before a container element expires. The function's
first parameter is of the same type of the container and the second first parameter is of the same type of the container and the second
parameter the same type of the container's index. The return parameter the same type of the container's index. The return
value is a :bro:type:`interval` indicating the amount of additional value is an :bro:type:`interval` indicating the amount of additional
time to wait before expiring the container element at the given time to wait before expiring the container element at the given
index (which will trigger another execution of this function). index (which will trigger another execution of this function).
@ -779,7 +939,7 @@ scripting language supports the following built-in attributes.
.. bro:attr:: &persistent .. bro:attr:: &persistent
Makes a variable persistent, i.e., its value is writen to disk (per Makes a variable persistent, i.e., its value is written to disk (per
default at shutdown time). default at shutdown time).
.. bro:attr:: &synchronized .. bro:attr:: &synchronized
@ -811,8 +971,9 @@ scripting language supports the following built-in attributes.
.. bro:attr:: &priority .. bro:attr:: &priority
Specifies the execution priority of an event handler. Higher values Specifies the execution priority (as a signed integer) of a hook or
are executed before lower ones. The default value is 0. event handler. Higher values are executed before lower ones. The
default value is 0.
.. bro:attr:: &group .. bro:attr:: &group
@ -825,5 +986,15 @@ scripting language supports the following built-in attributes.
.. bro:attr:: &error_handler .. bro:attr:: &error_handler
.. TODO: needs documented Internally set on the events that are associated with the reporter
framework: :bro:id:`reporter_info`, :bro:id:`reporter_warning`, and
:bro:id:`reporter_error`. It prevents any handlers of those events
from being able to generate reporter messages that go through any of
those events (i.e., it prevents an infinite event recursion). Instead,
such nested reporter messages are output to stderr.
.. bro:attr:: &type_column
Used by the input framework. It can be used on columns of type
:bro:type:`port` and specifies the name of an additional column in
the input file which specifies the protocol of the port (tcp/udp/icmp).

View file

@ -0,0 +1 @@
.. broxygen:file_analyzer:: *

View file

@ -0,0 +1,14 @@
================
Script Reference
================
.. toctree::
:maxdepth: 1
notices
proto-analyzers
file-analyzers
builtins
packages
scripts
Broxygen Example Script </scripts/broxygen/example.bro>

View file

@ -0,0 +1,8 @@
.. Not nice but I don't find a way to link to the notice index
.. directly from the upper level TOC tree.
Notices
=======
See the `Bro Notice Index <../bro-noticeindex.html>`_.

View file

@ -1,7 +1,7 @@
.. This is a stub doc to which broxygen appends during the build process .. _script-packages:
Index of All Bro Script Packages Bro Package Index
================================ =================
Bro has the following script packages (e.g. collections of related scripts in Bro has the following script packages (e.g. collections of related scripts in
a common directory). If the package directory contains a ``__load__.bro`` a common directory). If the package directory contains a ``__load__.bro``
@ -10,3 +10,5 @@ script, it supports being loaded in mass as a whole directory for convenience.
Packages/scripts in the ``base/`` directory are all loaded by default, while Packages/scripts in the ``base/`` directory are all loaded by default, while
ones in ``policy/`` provide functionality and customization options that are ones in ``policy/`` provide functionality and customization options that are
more appropriate for users to decide whether they'd like to load it or not. more appropriate for users to decide whether they'd like to load it or not.
.. broxygen:package_index:: *

View file

@ -0,0 +1 @@
.. broxygen:proto_analyzer:: *

View file

@ -0,0 +1,5 @@
================
Bro Script Index
================
.. broxygen:script_index:: *

View file

@ -0,0 +1,6 @@
@load base/protocols/conn
event connection_state_remove(c: connection)
{
print c;
}

View file

@ -0,0 +1,7 @@
@load base/protocols/conn
@load base/protocols/http
event connection_state_remove(c: connection)
{
print c;
}

View file

@ -0,0 +1,22 @@
type Service: record {
name: string;
ports: set[port];
rfc: count;
};
function print_service(serv: Service): string
{
print fmt("Service: %s(RFC%d)",serv$name, serv$rfc);
for ( p in serv$ports )
print fmt(" port: %s", p);
}
event bro_init()
{
local dns: Service = [$name="dns", $ports=set(53/udp, 53/tcp), $rfc=1035];
local http: Service = [$name="http", $ports=set(80/tcp, 8080/tcp), $rfc=2616];
print_service(dns);
print_service(http);
}

View file

@ -0,0 +1,41 @@
type Service: record {
name: string;
ports: set[port];
rfc: count;
};
type System: record {
name: string;
services: set[Service];
};
function print_service(serv: Service): string
{
print fmt(" Service: %s(RFC%d)",serv$name, serv$rfc);
for ( p in serv$ports )
print fmt(" port: %s", p);
}
function print_system(sys: System): string
{
print fmt("System: %s", sys$name);
for ( s in sys$services )
print_service(s);
}
event bro_init()
{
local server01: System;
server01$name = "morlock";
add server01$services[[ $name="dns", $ports=set(53/udp, 53/tcp), $rfc=1035]];
add server01$services[[ $name="http", $ports=set(80/tcp, 8080/tcp), $rfc=2616]];
print_system(server01);
# local dns: Service = [ $name="dns", $ports=set(53/udp, 53/tcp), $rfc=1035];
# local http: Service = [ $name="http", $ports=set(80/tcp, 8080/tcp), $rfc=2616];
# print_service(dns);
# print_service(http);
}

Some files were not shown because too many files have changed in this diff Show more