Merge remote-tracking branch 'origin/master' into topic/seth/pppoe

This commit is contained in:
Seth Hall 2012-10-24 00:50:43 -04:00
commit 012acb22e9
1326 changed files with 60201 additions and 14196 deletions

1655
CHANGES

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
project(Bro C CXX)
cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
cmake_minimum_required(VERSION 2.6.3 FATAL_ERROR)
include(cmake/CommonCMakeConfig.cmake)
########################################################################
@ -53,6 +53,8 @@ FindRequiredPackage(BISON)
FindRequiredPackage(PCAP)
FindRequiredPackage(OpenSSL)
FindRequiredPackage(BIND)
FindRequiredPackage(LibMagic)
FindRequiredPackage(ZLIB)
if (NOT BinPAC_ROOT_DIR AND
EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/aux/binpac/CMakeLists.txt)
@ -72,26 +74,12 @@ include_directories(BEFORE
${OpenSSL_INCLUDE_DIR}
${BIND_INCLUDE_DIR}
${BinPAC_INCLUDE_DIR}
${LibMagic_INCLUDE_DIR}
${ZLIB_INCLUDE_DIR}
)
# Optional Dependencies
set(HAVE_LIBMAGIC false)
find_package(LibMagic)
if (LIBMAGIC_FOUND)
set(HAVE_LIBMAGIC true)
include_directories(BEFORE ${LibMagic_INCLUDE_DIR})
list(APPEND OPTLIBS ${LibMagic_LIBRARY})
endif ()
set(HAVE_LIBZ false)
find_package(ZLIB)
if (ZLIB_FOUND)
set(HAVE_LIBZ true)
include_directories(BEFORE ${ZLIB_INCLUDE_DIR})
list(APPEND OPTLIBS ${ZLIB_LIBRARY})
endif ()
set(USE_GEOIP false)
find_package(LibGeoIP)
if (LIBGEOIP_FOUND)
@ -100,16 +88,75 @@ if (LIBGEOIP_FOUND)
list(APPEND OPTLIBS ${LibGeoIP_LIBRARY})
endif ()
set(USE_PERFTOOLS false)
if (ENABLE_PERFTOOLS)
find_package(GooglePerftools)
if (GOOGLEPERFTOOLS_FOUND)
set(USE_PERFTOOLS true)
include_directories(BEFORE ${GooglePerftools_INCLUDE_DIR})
list(APPEND OPTLIBS ${GooglePerftools_LIBRARIES})
set(HAVE_PERFTOOLS false)
set(USE_PERFTOOLS_DEBUG false)
set(USE_PERFTOOLS_TCMALLOC false)
if (NOT DISABLE_PERFTOOLS)
find_package(GooglePerftools)
endif ()
if (GOOGLEPERFTOOLS_FOUND)
set(HAVE_PERFTOOLS true)
# Non-Linux systems may not be well-supported by gperftools, so
# require explicit request from user to enable it in that case.
if (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ENABLE_PERFTOOLS)
set(USE_PERFTOOLS_TCMALLOC true)
if (ENABLE_PERFTOOLS_DEBUG)
# Enable heap debugging with perftools.
set(USE_PERFTOOLS_DEBUG true)
include_directories(BEFORE ${GooglePerftools_INCLUDE_DIR})
list(APPEND OPTLIBS ${GooglePerftools_LIBRARIES_DEBUG})
else ()
# Link in tcmalloc for better performance.
list(APPEND OPTLIBS ${GooglePerftools_LIBRARIES})
endif ()
endif ()
endif ()
set(USE_DATASERIES false)
find_package(Lintel)
find_package(DataSeries)
find_package(LibXML2)
if (NOT DISABLE_DATASERIES AND
LINTEL_FOUND AND DATASERIES_FOUND AND LIBXML2_FOUND)
set(USE_DATASERIES true)
include_directories(BEFORE ${Lintel_INCLUDE_DIR})
include_directories(BEFORE ${DataSeries_INCLUDE_DIR})
include_directories(BEFORE ${LibXML2_INCLUDE_DIR})
list(APPEND OPTLIBS ${Lintel_LIBRARIES})
list(APPEND OPTLIBS ${DataSeries_LIBRARIES})
list(APPEND OPTLIBS ${LibXML2_LIBRARIES})
endif()
set(USE_ELASTICSEARCH false)
set(USE_CURL false)
find_package(LibCURL)
if (NOT DISABLE_ELASTICSEARCH AND LIBCURL_FOUND)
set(USE_ELASTICSEARCH true)
set(USE_CURL true)
include_directories(BEFORE ${LibCURL_INCLUDE_DIR})
list(APPEND OPTLIBS ${LibCURL_LIBRARIES})
endif()
if (ENABLE_PERFTOOLS_DEBUG)
# Just a no op to prevent CMake from complaining about manually-specified
# ENABLE_PERFTOOLS_DEBUG not being used if google perftools weren't found
endif ()
set(brodeps
${BinPAC_LIBRARY}
${PCAP_LIBRARY}
${OpenSSL_LIBRARIES}
${BIND_LIBRARY}
${LibMagic_LIBRARY}
${ZLIB_LIBRARY}
${OPTLIBS}
)
########################################################################
## System Introspection
@ -184,9 +231,13 @@ message(
"\nAux. Tools: ${INSTALL_AUX_TOOLS}"
"\n"
"\nGeoIP: ${USE_GEOIP}"
"\nlibz: ${HAVE_LIBZ}"
"\nlibmagic: ${HAVE_LIBMAGIC}"
"\nGoogle perftools: ${USE_PERFTOOLS}"
"\ngperftools found: ${HAVE_PERFTOOLS}"
"\n tcmalloc: ${USE_PERFTOOLS_TCMALLOC}"
"\n debugging: ${USE_PERFTOOLS_DEBUG}"
"\ncURL: ${USE_CURL}"
"\n"
"\nDataSeries: ${USE_DATASERIES}"
"\nElasticSearch: ${USE_ELASTICSEARCH}"
"\n"
"\n================================================================\n"
)

View file

@ -1,4 +1,4 @@
Copyright (c) 1995-2011, The Regents of the University of California
Copyright (c) 1995-2012, The Regents of the University of California
through the Lawrence Berkeley National Laboratory and the
International Computer Science Institute. All rights reserved.

144
DocSourcesList.cmake Normal file
View file

@ -0,0 +1,144 @@
# DO NOT EDIT
# This file is auto-generated from the genDocSourcesList.sh script.
#
# This is a list of Bro script sources for which to generate reST documentation.
# It will be included inline in the CMakeLists.txt found in the same directory
# in order to create Makefile targets that define how to generate reST from
# a given Bro script.
#
# Note: any path prefix of the script (2nd argument of rest_target macro)
# will be used to derive what path under scripts/ the generated documentation
# will be placed.
set(psd ${PROJECT_SOURCE_DIR}/scripts)
rest_target(${CMAKE_CURRENT_SOURCE_DIR} example.bro internal)
rest_target(${psd} base/init-default.bro internal)
rest_target(${psd} base/init-bare.bro internal)
rest_target(${CMAKE_BINARY_DIR}/src base/bro.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/const.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/event.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/logging.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/reporter.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/strings.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/types.bif.bro)
rest_target(${psd} base/frameworks/cluster/main.bro)
rest_target(${psd} base/frameworks/cluster/nodes/manager.bro)
rest_target(${psd} base/frameworks/cluster/nodes/proxy.bro)
rest_target(${psd} base/frameworks/cluster/nodes/worker.bro)
rest_target(${psd} base/frameworks/cluster/setup-connections.bro)
rest_target(${psd} base/frameworks/communication/main.bro)
rest_target(${psd} base/frameworks/control/main.bro)
rest_target(${psd} base/frameworks/dpd/main.bro)
rest_target(${psd} base/frameworks/intel/main.bro)
rest_target(${psd} base/frameworks/logging/main.bro)
rest_target(${psd} base/frameworks/logging/postprocessors/scp.bro)
rest_target(${psd} base/frameworks/logging/postprocessors/sftp.bro)
rest_target(${psd} base/frameworks/logging/writers/ascii.bro)
rest_target(${psd} base/frameworks/logging/writers/dataseries.bro)
rest_target(${psd} base/frameworks/metrics/cluster.bro)
rest_target(${psd} base/frameworks/metrics/main.bro)
rest_target(${psd} base/frameworks/metrics/non-cluster.bro)
rest_target(${psd} base/frameworks/notice/actions/add-geodata.bro)
rest_target(${psd} base/frameworks/notice/actions/drop.bro)
rest_target(${psd} base/frameworks/notice/actions/email_admin.bro)
rest_target(${psd} base/frameworks/notice/actions/page.bro)
rest_target(${psd} base/frameworks/notice/actions/pp-alarms.bro)
rest_target(${psd} base/frameworks/notice/cluster.bro)
rest_target(${psd} base/frameworks/notice/extend-email/hostnames.bro)
rest_target(${psd} base/frameworks/notice/main.bro)
rest_target(${psd} base/frameworks/notice/weird.bro)
rest_target(${psd} base/frameworks/packet-filter/main.bro)
rest_target(${psd} base/frameworks/packet-filter/netstats.bro)
rest_target(${psd} base/frameworks/reporter/main.bro)
rest_target(${psd} base/frameworks/signatures/main.bro)
rest_target(${psd} base/frameworks/software/main.bro)
rest_target(${psd} base/protocols/conn/contents.bro)
rest_target(${psd} base/protocols/conn/inactivity.bro)
rest_target(${psd} base/protocols/conn/main.bro)
rest_target(${psd} base/protocols/dns/consts.bro)
rest_target(${psd} base/protocols/dns/main.bro)
rest_target(${psd} base/protocols/ftp/file-extract.bro)
rest_target(${psd} base/protocols/ftp/main.bro)
rest_target(${psd} base/protocols/ftp/utils-commands.bro)
rest_target(${psd} base/protocols/http/file-extract.bro)
rest_target(${psd} base/protocols/http/file-hash.bro)
rest_target(${psd} base/protocols/http/file-ident.bro)
rest_target(${psd} base/protocols/http/main.bro)
rest_target(${psd} base/protocols/http/utils.bro)
rest_target(${psd} base/protocols/irc/dcc-send.bro)
rest_target(${psd} base/protocols/irc/main.bro)
rest_target(${psd} base/protocols/smtp/entities-excerpt.bro)
rest_target(${psd} base/protocols/smtp/entities.bro)
rest_target(${psd} base/protocols/smtp/main.bro)
rest_target(${psd} base/protocols/ssh/main.bro)
rest_target(${psd} base/protocols/ssl/consts.bro)
rest_target(${psd} base/protocols/ssl/main.bro)
rest_target(${psd} base/protocols/ssl/mozilla-ca-list.bro)
rest_target(${psd} base/protocols/syslog/consts.bro)
rest_target(${psd} base/protocols/syslog/main.bro)
rest_target(${psd} base/utils/addrs.bro)
rest_target(${psd} base/utils/conn-ids.bro)
rest_target(${psd} base/utils/directions-and-hosts.bro)
rest_target(${psd} base/utils/files.bro)
rest_target(${psd} base/utils/numbers.bro)
rest_target(${psd} base/utils/paths.bro)
rest_target(${psd} base/utils/patterns.bro)
rest_target(${psd} base/utils/site.bro)
rest_target(${psd} base/utils/strings.bro)
rest_target(${psd} base/utils/thresholds.bro)
rest_target(${psd} policy/frameworks/communication/listen.bro)
rest_target(${psd} policy/frameworks/control/controllee.bro)
rest_target(${psd} policy/frameworks/control/controller.bro)
rest_target(${psd} policy/frameworks/dpd/detect-protocols.bro)
rest_target(${psd} policy/frameworks/dpd/packet-segment-logging.bro)
rest_target(${psd} policy/frameworks/metrics/conn-example.bro)
rest_target(${psd} policy/frameworks/metrics/http-example.bro)
rest_target(${psd} policy/frameworks/metrics/ssl-example.bro)
rest_target(${psd} policy/frameworks/software/version-changes.bro)
rest_target(${psd} policy/frameworks/software/vulnerable.bro)
rest_target(${psd} policy/integration/barnyard2/main.bro)
rest_target(${psd} policy/integration/barnyard2/types.bro)
rest_target(${psd} policy/misc/analysis-groups.bro)
rest_target(${psd} policy/misc/capture-loss.bro)
rest_target(${psd} policy/misc/loaded-scripts.bro)
rest_target(${psd} policy/misc/profiling.bro)
rest_target(${psd} policy/misc/stats.bro)
rest_target(${psd} policy/misc/trim-trace-file.bro)
rest_target(${psd} policy/protocols/conn/known-hosts.bro)
rest_target(${psd} policy/protocols/conn/known-services.bro)
rest_target(${psd} policy/protocols/conn/weirds.bro)
rest_target(${psd} policy/protocols/dns/auth-addl.bro)
rest_target(${psd} policy/protocols/dns/detect-external-names.bro)
rest_target(${psd} policy/protocols/ftp/detect.bro)
rest_target(${psd} policy/protocols/ftp/software.bro)
rest_target(${psd} policy/protocols/http/detect-MHR.bro)
rest_target(${psd} policy/protocols/http/detect-intel.bro)
rest_target(${psd} policy/protocols/http/detect-sqli.bro)
rest_target(${psd} policy/protocols/http/detect-webapps.bro)
rest_target(${psd} policy/protocols/http/header-names.bro)
rest_target(${psd} policy/protocols/http/software-browser-plugins.bro)
rest_target(${psd} policy/protocols/http/software.bro)
rest_target(${psd} policy/protocols/http/var-extraction-cookies.bro)
rest_target(${psd} policy/protocols/http/var-extraction-uri.bro)
rest_target(${psd} policy/protocols/smtp/blocklists.bro)
rest_target(${psd} policy/protocols/smtp/detect-suspicious-orig.bro)
rest_target(${psd} policy/protocols/smtp/software.bro)
rest_target(${psd} policy/protocols/ssh/detect-bruteforcing.bro)
rest_target(${psd} policy/protocols/ssh/geo-data.bro)
rest_target(${psd} policy/protocols/ssh/interesting-hostnames.bro)
rest_target(${psd} policy/protocols/ssh/software.bro)
rest_target(${psd} policy/protocols/ssl/cert-hash.bro)
rest_target(${psd} policy/protocols/ssl/expiring-certs.bro)
rest_target(${psd} policy/protocols/ssl/extract-certs-pem.bro)
rest_target(${psd} policy/protocols/ssl/known-certs.bro)
rest_target(${psd} policy/protocols/ssl/validate-certs.bro)
rest_target(${psd} policy/tuning/defaults/packet-fragments.bro)
rest_target(${psd} policy/tuning/defaults/warnings.bro)
rest_target(${psd} policy/tuning/track-all-assets.bro)
rest_target(${psd} site/local-manager.bro)
rest_target(${psd} site/local-proxy.bro)
rest_target(${psd} site/local-worker.bro)
rest_target(${psd} site/local.bro)
rest_target(${psd} test-all-policy.bro)

81
INSTALL
View file

@ -5,33 +5,44 @@ Installing Bro
Prerequisites
=============
Bro relies on the following libraries and tools, which need to be installed
Bro requires the following libraries and tools to be installed
before you begin:
* CMake 2.6 or greater http://www.cmake.org
* CMake 2.6.3 or greater http://www.cmake.org
* Libpcap (headers and libraries) http://www.tcpdump.org
* Perl (used only during the Bro build process)
* OpenSSL (headers and libraries) http://www.openssl.org
* Libpcap headers and libraries http://www.tcpdump.org
Bro can make uses of some optional libraries if they are found at
installation time:
* OpenSSL headers and libraries http://www.openssl.org
* Libmagic For identifying file types (e.g., in FTP transfers).
* BIND8 headers and libraries
* LibGeoIP For geo-locating IP addresses.
* Libmagic
* Libz For decompressing HTTP bodies by the HTTP analyzer, and for
compressed Bro-to-Bro communication.
* Libz
* SWIG http://www.swig.org
Bro also needs the following tools, but on most systems they will
already come preinstalled:
* BIND8 (headers and libraries)
* Bison (GNU Parser Generator)
* Flex (Fast Lexical Analyzer)
* Perl (Used only during the Bro build process)
* Bash (for BroControl)
Bro can make use of some optional libraries and tools if they are found at
build time:
* LibGeoIP (for geo-locating IP addresses)
* gperftools (tcmalloc is used to improve memory and CPU usage)
* sendmail (for BroControl)
* ipsumdump (for trace-summary) http://www.cs.ucla.edu/~kohler/ipsumdump
* Ruby executable, library, and headers (for Broccoli Ruby bindings)
Installation
@ -39,18 +50,18 @@ Installation
To build and install into ``/usr/local/bro``::
> ./configure
> make
> make install
./configure
make
make install
This will first build Bro into a directory inside the distribution
This will first build Bro in a directory inside the distribution
called ``build/``, using default build options. It then installs all
required files into ``/usr/local/bro``, including the Bro binary in
``/usr/local/bro/bin/bro``.
You can specify a different installation directory with::
> ./configure --prefix=<dir>
./configure --prefix=<dir>
Note that ``/usr`` and ``/opt/bro`` are the standard prefixes for
binary Bro packages to be installed, so those are typically not good
@ -59,23 +70,23 @@ choices unless you are creating such a package.
Run ``./configure --help`` for more options.
Depending on the Bro package you downloaded, there may be auxiliary
tools and libraries available in the ``aux/`` directory. All of them
except for ``aux/bro-aux`` will also be built and installed by doing
``make install``. To install the programs that come in the
``aux/bro-aux`` directory, use ``make install-aux``. There are
tools and libraries available in the ``aux/`` directory. Some of them
will be automatically built and installed along with Bro. There are
``--disable-*`` options that can be given to the configure script to
turn off unwanted auxiliary projects.
turn off unwanted auxiliary projects that would otherwise be installed
automatically. Finally, use ``make install-aux`` to install some of
the other programs that are in the ``aux/bro-aux`` directory.
OpenBSD users, please see our FAQ at
http://www.bro-ids.org/documentation/faq.html if you are having
problems installing Bro.
Running Bro
===========
Bro is a complex program and it takes a bit of time to get familiar
with it. A good place for newcomers to start is the quick start guide
available here:
http://www.bro-ids.org/documentation/quickstart.html
with it. A good place for newcomers to start is the Quick Start Guide
at http://www.bro-ids.org/documentation/quickstart.html.
For developers that wish to run Bro directly from the ``build/``
directory (i.e., without performing ``make install``), they will have
@ -83,9 +94,9 @@ to first adjust ``BROPATH`` to look for scripts inside the build
directory. Sourcing either ``build/bro-path-dev.sh`` or
``build/bro-path-dev.csh`` as appropriate for the current shell
accomplishes this and also augments your ``PATH`` so you can use the
Bro binary directly:
Bro binary directly::
> ./configure
> make
> source build/bro-path-dev.sh
> bro <options>
./configure
make
source build/bro-path-dev.sh
bro <options>

View file

@ -2,7 +2,7 @@
# A simple static wrapper for a number of standard Makefile targets,
# mostly just forwarding to build/Makefile. This is provided only for
# convenience and supports only a subset of what CMake's Makefile
# to offer. For more, execute that one directly.
# offers. For more, execute that one directly.
#
BUILD=build
@ -12,22 +12,34 @@ VERSION_MIN=$(REPO)-`cat VERSION`-minimal
HAVE_MODULES=git submodule | grep -v cmake >/dev/null
all: configured
( cd $(BUILD) && make )
$(MAKE) -C $(BUILD) $@
install: configured
( cd $(BUILD) && make install )
install: configured all
$(MAKE) -C $(BUILD) $@
install-aux: configured
( cd $(BUILD) && make install-aux )
$(MAKE) -C $(BUILD) $@
clean: configured docclean
( cd $(BUILD) && make clean )
$(MAKE) -C $(BUILD) $@
doc: configured
( cd $(BUILD) && make doc )
$(MAKE) -C $(BUILD) $@
docclean: configured
( cd $(BUILD) && make docclean )
$(MAKE) -C $(BUILD) $@
restdoc: configured
$(MAKE) -C $(BUILD) $@
restclean: configured
$(MAKE) -C $(BUILD) $@
broxygen: configured
$(MAKE) -C $(BUILD) $@
broxygenclean: configured
$(MAKE) -C $(BUILD) $@
dist:
@rm -rf $(VERSION_FULL) $(VERSION_FULL).tgz
@ -48,6 +60,9 @@ bindist:
distclean:
rm -rf $(BUILD)
test:
@(cd testing && make )
configured:
@test -d $(BUILD) || ( echo "Error: No build/ directory found. Did you run configure?" && exit 1 )
@test -e $(BUILD)/Makefile || ( echo "Error: No build/Makefile found. Did you run configure?" && exit 1 )

257
NEWS Normal file
View file

@ -0,0 +1,257 @@
Release Notes
=============
This document summarizes the most important changes in the current Bro
release. For a complete list of changes, see the ``CHANGES`` file
(note that submodules, such as BroControl and Broccoli, come with
their own CHANGES.)
Bro 2.2
-------
New Functionality
~~~~~~~~~~~~~~~~~
- GridFTP support. TODO: Extend.
- ssl.log now also records the subject client and issuer certificates.
Changed Functionality
~~~~~~~~~~~~~~~~~~~~~
- We removed the following, already deprecated, functionality:
* Scripting language:
- &disable_print_hook attribute.
* BiF functions:
- parse_dotted_addr(), dump_config(),
make_connection_persistent(), generate_idmef(),
split_complete()
- Removed a now unused argument from "do_split" helper function.
- "this" is no longer a reserved keyword.
- The Input Framework's update_finished event has been renamed to
end_of_data. It will now not only fire after table-reads have been
completed, but also after the last event of a whole-file-read (or
whole-db-read, etc.).
Bro 2.1
-------
New Functionality
~~~~~~~~~~~~~~~~~
- Bro now comes with extensive IPv6 support. Past versions offered
only basic IPv6 functionality that was rarely used in practice as it
had to be enabled explicitly. IPv6 support is now fully integrated
into all parts of Bro including protocol analysis and the scripting
language. It's on by default and no longer requires any special
configuration.
Some of the most significant enhancements include support for IPv6
fragment reassembly, support for following IPv6 extension header
chains, and support for tunnel decapsulation (6to4 and Teredo). The
DNS analyzer now handles AAAA records properly, and DNS lookups that
Bro itself performs now include AAAA queries, so that, for example,
the result returned by script-level lookups is a set that can
contain both IPv4 and IPv6 addresses. Support for the most common
ICMPv6 message types has been added. Also, the FTP EPSV and EPRT
commands are now handled properly. Internally, the way IP addresses
are stored has been improved, so Bro can handle both IPv4
and IPv6 by default without any special configuration.
In addition to Bro itself, the other Bro components have also been
made IPv6-aware by default. In particular, significant changes were
made to trace-summary, PySubnetTree, and Broccoli to support IPv6.
- Bro now decapsulates tunnels via its new tunnel framework located in
scripts/base/frameworks/tunnels. It currently supports Teredo,
AYIYA, IP-in-IP (both IPv4 and IPv6), and SOCKS. For all these, it
logs the outer tunnel connections in both conn.log and tunnel.log,
and then proceeds to analyze the inner payload as if it were not
tunneled, including also logging that session in conn.log. For
SOCKS, it generates a new socks.log in addition with more
information.
- Bro now features a flexible input framework that allows users to
integrate external information in real-time into Bro while it's
processing network traffic. The most direct use-case at the moment
is reading data from ASCII files into Bro tables, with updates
picked up automatically when the file changes during runtime. See
doc/input.rst for more information.
Internally, the input framework is structured around the notion of
"reader plugins" that make it easy to interface to different data
sources. We will add more in the future.
- BroControl now has built-in support for host-based load-balancing
when using either PF_RING, Myricom cards, or individual interfaces.
Instead of adding a separate worker entry in node.cfg for each Bro
worker process on each worker host, it is now possible to just
specify the number of worker processes on each host and BroControl
configures everything correctly (including any neccessary enviroment
variables for the balancers).
This change adds three new keywords to the node.cfg file (to be used
with worker entries): lb_procs (specifies number of workers on a
host), lb_method (specifies what type of load balancing to use:
pf_ring, myricom, or interfaces), and lb_interfaces (used only with
"lb_method=interfaces" to specify which interfaces to load-balance
on).
- Bro's default ASCII log format is not exactly the most efficient way
for storing and searching large volumes of data. An alternatives,
Bro now comes with experimental support for two alternative output
formats:
* DataSeries: an efficient binary format for recording structured
bulk data. DataSeries is developed and maintained at HP Labs.
See doc/logging-dataseries for more information.
* ElasticSearch: a distributed RESTful, storage engine and search
engine built on top of Apache Lucene. It scales very well, both
for distributed indexing and distributed searching. See
doc/logging-elasticsearch.rst for more information.
Note that at this point, we consider Bro's support for these two
formats as prototypes for collecting experience with alternative
outputs. We do not yet recommend them for production (but welcome
feedback!)
Changed Functionality
~~~~~~~~~~~~~~~~~~~~~
The following summarizes the most important differences in existing
functionality. Note that this list is not complete, see CHANGES for
the full set.
- Changes in dependencies:
* Bro now requires CMake >= 2.6.3.
* On Linux, Bro now links in tcmalloc (part of Google perftools)
if found at configure time. Doing so can significantly improve
memory and CPU use.
On the other platforms, the new configure option
--enable-perftools can be used to enable linking to tcmalloc.
(Note that perftools's support for non-Linux platforms may be
less reliable).
- The configure switch --enable-brov6 is gone.
- DNS name lookups performed by Bro now also query AAAA records. The
results of the A and AAAA queries for a given hostname are combined
such that at the scripting layer, the name resolution can yield a
set with both IPv4 and IPv6 addresses.
- The connection compressor was already deprecated in 2.0 and has now
been removed from the code base.
- We removed the "match" statement, which was no longer used by any of
the default scripts, nor was it likely to be used by anybody anytime
soon. With that, "match" and "using" are no longer reserved keywords.
- The syntax for IPv6 literals changed from "2607:f8b0:4009:802::1012"
to "[2607:f8b0:4009:802::1012]".
- Bro now spawns threads for doing its logging. From a user's
perspective not much should change, except that the OS may now show
a bunch of Bro threads.
- We renamed the configure option --enable-perftools to
--enable-perftools-debug to indicate that the switch is only relevant
for debugging the heap.
- Bro's ICMP analyzer now handles both IPv4 and IPv6 messages with a
joint set of events. The `icmp_conn` record got a new boolean field
'v6' that indicates whether the ICMP message is v4 or v6.
- Log postprocessor scripts get an additional argument indicating the
type of the log writer in use (e.g., "ascii").
- BroControl's make-archive-name script also receives the writer
type, but as its 2nd(!) argument. If you're using a custom version
of that script, you need to adapt it. See the shipped version for
details.
- Signature files can now be loaded via the new "@load-sigs"
directive. In contrast to the existing (and still supported)
signature_files constant, this can be used to load signatures
relative to the current script (e.g., "@load-sigs ./foo.sig").
- The options "tunnel_port" and "parse_udp_tunnels" have been removed.
Bro now supports decapsulating tunnels directly for protocols it
understands.
- ASCII logs now record the time when they were opened/closed at the
beginning and end of the file, respectively (wall clock). The
options LogAscii::header_prefix and LogAscii::include_header have
been renamed to LogAscii::meta_prefix and LogAscii::include_meta,
respectively.
- The ASCII writers "header_*" options have been renamed to "meta_*"
(because there's now also a footer).
Bro 2.0
-------
As the version number jump suggests, Bro 2.0 is a major upgrade and
lots of things have changed. We have assembled a separate upgrade
guide with the most important changes compared to Bro 1.5 at
http://www.bro-ids.org/documentation/upgrade.html. You can find
the offline version of that document in ``doc/upgrade.rst.``.
Compared to the earlier 2.0 Beta version, the major changes in the
final release are:
* The default scripts now come with complete reference
documentation. See
http://www.bro-ids.org/documentation/index.html.
* libz and libmagic are now required dependencies.
* Reduced snaplen default from 65535 to old default of 8192. The
large value was introducing performance problems on many
systems.
* Replaced the --snaplen/-l command line option with a
scripting-layer option called "snaplen". The new option can also
be redefined on the command line, e.g. ``bro -i eth0
snaplen=65535``.
* Reintroduced the BRO_LOG_SUFFIX environment variable that the
ASCII logger now respects to add a suffix to the log files it
creates.
* The ASCII logs now include further header information, and
fields set to an empty value are now logged as ``(empty)`` by
default (instead of ``-``, which is already used for fields that
are not set at all).
* Some NOTICES were renamed, and the signatures of some SSL events
have changed.
* bro-cut got some new capabilities:
- If no field names are given on the command line, we now pass
through all fields.
- New options -u/-U for time output in UTC.
- New option -F to give output field separator.
* Broccoli supports more types internally, allowing to send
complex records.
* Many smaller bug fixes, portability improvements, and general
polishing across all modules.

10
README
View file

@ -4,13 +4,15 @@ Bro Network Security Monitor
Bro is a powerful framework for network analysis and security
monitoring. Please see the INSTALL file for installation instructions
and pointers for getting started. For more documentation, research
publications, and community contact information, see Bro's home page:
and pointers for getting started. NEWS contains release notes for the
current version, and CHANGES has the complete history of changes.
Please see COPYING for licensing information.
For more documentation, research publications, and community contact
information, please see Bro's home page:
http://www.bro-ids.org
Please see COPYING for licensing information.
On behalf of the Bro Development Team,
Vern Paxson & Robin Sommer,

View file

@ -1 +1 @@
2.0-beta-21
2.1-84

@ -1 +1 @@
Subproject commit 777b8a21c4c74e1f62e8b9896b082e8c059b539f
Subproject commit 74e6a5401c4228d5293c0e309283f43c389e7c12

@ -1 +1 @@
Subproject commit 906f970df5f708582c7002069b787d5af586b46f
Subproject commit 01bb93cb23f31a98fb400584e8d2f2fbe8a589ef

@ -1 +1 @@
Subproject commit e02e3cc89a3efb3d7ec376154e24835b4b828be8
Subproject commit 907210ce1470724fb386f939cc1b10a4caa2ae39

@ -1 +1 @@
Subproject commit 288c8568d7aaa38cf7c05833c133a91cbadbfce4
Subproject commit fd0e7e0b0cf50131efaf536a5683266cfe169455

@ -1 +1 @@
Subproject commit 7230a09a8c220d2117e491fdf293bf5c19819b65
Subproject commit 44a43e62452302277f88e8fac08d1f979dc53f98

2
cmake

@ -1 +1 @@
Subproject commit 704e255d7ef2faf926836c1c64d16c5b8a02b063
Subproject commit 14537f56d66b18ab9d5024f798caf4d1f356fc67

View file

@ -1,6 +1,3 @@
/* enable IPV6 processing */
#cmakedefine BROv6
/* Old libpcap versions (< 0.6.1) need defining pcap_freecode and
pcap_compile_nopcap */
#cmakedefine DONT_HAVE_LIBPCAP_PCAP_FREECODE
@ -14,18 +11,9 @@
/* Define if you have the `getopt_long' function. */
#cmakedefine HAVE_GETOPT_LONG
/* Define if you have the `magic' library (-lmagic). */
#cmakedefine HAVE_LIBMAGIC
/* Define if you have the `z' library (-lz). */
#cmakedefine HAVE_LIBZ
/* We are on a Linux system */
#cmakedefine HAVE_LINUX
/* Define if you have the <magic.h> header file. */
#cmakedefine HAVE_MAGIC_H
/* Define if you have the `mallinfo' function. */
#cmakedefine HAVE_MALLINFO
@ -41,8 +29,8 @@
/* Define if you have the <net/ethernet.h> header file. */
#cmakedefine HAVE_NET_ETHERNET_H
/* We are on a OpenBSD system */
#cmakedefine HAVE_OPENBSD
/* Define if you have the <net/ethertypes.h> header file. */
#cmakedefine HAVE_NET_ETHERTYPES_H
/* have os-proto.h */
#cmakedefine HAVE_OS_PROTO_H
@ -121,7 +109,19 @@
#cmakedefine HAVE_GEOIP_CITY_EDITION_REV0_V6
/* Use Google's perftools */
#cmakedefine USE_PERFTOOLS
#cmakedefine USE_PERFTOOLS_DEBUG
/* Analyze Mobile IPv6 traffic */
#cmakedefine ENABLE_MOBILE_IPV6
/* Use libCurl. */
#cmakedefine USE_CURL
/* Use the DataSeries writer. */
#cmakedefine USE_DATASERIES
/* Use the ElasticSearch writer. */
#cmakedefine USE_ELASTICSEARCH
/* Version number of package */
#define VERSION "@VERSION@"
@ -154,3 +154,58 @@
/* Define u_int8_t */
#cmakedefine u_int8_t @u_int8_t@
/* OpenBSD's bpf.h may not declare this data link type, but it's supposed to be
used consistently for the same purpose on all platforms. */
#cmakedefine HAVE_DLT_PPP_SERIAL
#ifndef HAVE_DLT_PPP_SERIAL
#define DLT_PPP_SERIAL @DLT_PPP_SERIAL@
#endif
/* IPv6 Next Header values defined by RFC 3542 */
#cmakedefine HAVE_IPPROTO_HOPOPTS
#ifndef HAVE_IPPROTO_HOPOPTS
#define IPPROTO_HOPOPTS 0
#endif
#cmakedefine HAVE_IPPROTO_IPV6
#ifndef HAVE_IPPROTO_IPV6
#define IPPROTO_IPV6 41
#endif
#cmakedefine HAVE_IPPROTO_IPV4
#ifndef HAVE_IPPROTO_IPV4
#define IPPROTO_IPV4 4
#endif
#cmakedefine HAVE_IPPROTO_ROUTING
#ifndef HAVE_IPPROTO_ROUTING
#define IPPROTO_ROUTING 43
#endif
#cmakedefine HAVE_IPPROTO_FRAGMENT
#ifndef HAVE_IPPROTO_FRAGMENT
#define IPPROTO_FRAGMENT 44
#endif
#cmakedefine HAVE_IPPROTO_ESP
#ifndef HAVE_IPPROTO_ESP
#define IPPROTO_ESP 50
#endif
#cmakedefine HAVE_IPPROTO_AH
#ifndef HAVE_IPPROTO_AH
#define IPPROTO_AH 51
#endif
#cmakedefine HAVE_IPPROTO_ICMPV6
#ifndef HAVE_IPPROTO_ICMPV6
#define IPPROTO_ICMPV6 58
#endif
#cmakedefine HAVE_IPPROTO_NONE
#ifndef HAVE_IPPROTO_NONE
#define IPPROTO_NONE 59
#endif
#cmakedefine HAVE_IPPROTO_DSTOPTS
#ifndef HAVE_IPPROTO_DSTOPTS
#define IPPROTO_DSTOPTS 60
#endif
/* IPv6 options structure defined by RFC 3542 */
#cmakedefine HAVE_IP6_OPT
/* Common IPv6 extension structure */
#cmakedefine HAVE_IP6_EXT

75
configure vendored
View file

@ -1,7 +1,7 @@
#!/bin/sh
# Convenience wrapper for easily viewing/setting options that
# the project's CMake scripts will recognize
set -e
command="$0 $*"
# check for `cmake` command
@ -24,16 +24,22 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--prefix=PREFIX installation directory [/usr/local/bro]
--scriptdir=PATH root installation directory for Bro scripts
[PREFIX/share/bro]
--conf-files-dir=PATH config files installation directory [PREFIX/etc]
Optional Features:
--enable-debug compile in debugging mode
--enable-brov6 enable IPv6 processing
--enable-perftools use Google's perftools
--enable-mobile-ipv6 analyze mobile IPv6 features defined by RFC 6275
--enable-perftools force use of Google perftools on non-Linux systems
(automatically on when perftools is present on Linux)
--enable-perftools-debug use Google's perftools for debugging
--disable-broccoli don't build or install the Broccoli library
--disable-broctl don't install Broctl
--disable-auxtools don't build or install auxilliary tools
--disable-auxtools don't build or install auxiliary tools
--disable-perftools don't try to build with Google Perftools
--disable-python don't try to build python bindings for broccoli
--disable-ruby don't try to build ruby bindings for broccoli
--disable-dataseries don't use the optional DataSeries log writer
--disable-elasticsearch don't use the optional ElasticSearch log writer
Required Packages in Non-Standard Locations:
--with-openssl=PATH path to OpenSSL install root
@ -55,6 +61,9 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--with-ruby-lib=PATH path to ruby library
--with-ruby-inc=PATH path to ruby headers
--with-swig=PATH path to SWIG executable
--with-dataseries=PATH path to DataSeries and Lintel libraries
--with-xml2=PATH path to libxml2 installation (for DataSeries)
--with-curl=PATH path to libcurl install root (for ElasticSearch)
Packaging Options (for developers):
--binary-package toggle special logic for binary packaging
@ -86,20 +95,24 @@ append_cache_entry () {
# set defaults
builddir=build
prefix=/usr/local/bro
CMakeCacheEntries=""
append_cache_entry CMAKE_INSTALL_PREFIX PATH /usr/local/bro
append_cache_entry BRO_ROOT_DIR PATH /usr/local/bro
append_cache_entry PY_MOD_INSTALL_DIR PATH /usr/local/bro/lib/broctl
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING /usr/local/bro/share/bro
append_cache_entry CMAKE_INSTALL_PREFIX PATH $prefix
append_cache_entry BRO_ROOT_DIR PATH $prefix
append_cache_entry PY_MOD_INSTALL_DIR PATH $prefix/lib/broctl
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro
append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc
append_cache_entry ENABLE_DEBUG BOOL false
append_cache_entry BROv6 BOOL false
append_cache_entry ENABLE_PERFTOOLS BOOL false
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false
append_cache_entry BinPAC_SKIP_INSTALL BOOL true
append_cache_entry BUILD_SHARED_LIBS BOOL true
append_cache_entry INSTALL_AUX_TOOLS BOOL true
append_cache_entry INSTALL_BROCCOLI BOOL true
append_cache_entry INSTALL_BROCTL BOOL true
append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING
append_cache_entry ENABLE_MOBILE_IPV6 BOOL false
append_cache_entry DISABLE_PERFTOOLS BOOL false
# parse arguments
while [ $# -ne 0 ]; do
@ -120,26 +133,32 @@ while [ $# -ne 0 ]; do
CMakeGenerator="$optarg"
;;
--prefix=*)
prefix=$optarg
append_cache_entry CMAKE_INSTALL_PREFIX PATH $optarg
append_cache_entry BRO_ROOT_DIR PATH $optarg
append_cache_entry PY_MOD_INSTALL_DIR PATH $optarg/lib/broctl
if [ "$user_set_scriptdir" != "true" ]; then
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg/share/bro
fi
;;
--scriptdir=*)
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg
user_set_scriptdir="true"
;;
--conf-files-dir=*)
append_cache_entry BRO_ETC_INSTALL_DIR PATH $optarg
user_set_conffilesdir="true"
;;
--enable-debug)
append_cache_entry ENABLE_DEBUG BOOL true
;;
--enable-brov6)
append_cache_entry BROv6 BOOL true
--enable-mobile-ipv6)
append_cache_entry ENABLE_MOBILE_IPV6 BOOL true
;;
--enable-perftools)
append_cache_entry ENABLE_PERFTOOLS BOOL true
;;
--enable-perftools-debug)
append_cache_entry ENABLE_PERFTOOLS BOOL true
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL true
;;
--disable-broccoli)
append_cache_entry INSTALL_BROCCOLI BOOL false
;;
@ -149,12 +168,21 @@ while [ $# -ne 0 ]; do
--disable-auxtools)
append_cache_entry INSTALL_AUX_TOOLS BOOL false
;;
--disable-perftools)
append_cache_entry DISABLE_PERFTOOLS BOOL true
;;
--disable-python)
append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true
;;
--disable-ruby)
append_cache_entry DISABLE_RUBY_BINDINGS BOOL true
;;
--disable-dataseries)
append_cache_entry DISABLE_DATASERIES BOOL true
;;
--disable-elasticsearch)
append_cache_entry DISABLE_ELASTICSEARCH BOOL true
;;
--with-openssl=*)
append_cache_entry OpenSSL_ROOT_DIR PATH $optarg
;;
@ -183,7 +211,6 @@ while [ $# -ne 0 ]; do
append_cache_entry LibGeoIP_ROOT_DIR PATH $optarg
;;
--with-perftools=*)
append_cache_entry ENABLE_PERFTOOLS BOOL true
append_cache_entry GooglePerftools_ROOT_DIR PATH $optarg
;;
--with-python=*)
@ -209,6 +236,16 @@ while [ $# -ne 0 ]; do
--with-swig=*)
append_cache_entry SWIG_EXECUTABLE PATH $optarg
;;
--with-dataseries=*)
append_cache_entry DataSeries_ROOT_DIR PATH $optarg
append_cache_entry Lintel_ROOT_DIR PATH $optarg
;;
--with-xml2=*)
append_cache_entry LibXML2_ROOT_DIR PATH $optarg
;;
--with-curl=*)
append_cache_entry LibCURL_ROOT_DIR PATH $optarg
;;
--binary-package)
append_cache_entry BINARY_PACKAGING_MODE BOOL true
;;
@ -232,6 +269,14 @@ while [ $# -ne 0 ]; do
shift
done
if [ "$user_set_scriptdir" != "true" ]; then
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro
fi
if [ "$user_set_conffilesdir" != "true" ]; then
append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc
fi
if [ -d $builddir ]; then
# If build directory exists, check if it has a CMake cache
if [ -f $builddir/CMakeCache.txt ]; then

1
doc/.gitignore vendored
View file

@ -1 +1,2 @@
html
*.pyc

1
doc/CHANGES Symbolic link
View file

@ -0,0 +1 @@
../CHANGES

View file

@ -1,4 +1,75 @@
add_custom_target(doc)
add_custom_target(docclean)
set(BIF_SRC_DIR ${PROJECT_SOURCE_DIR}/src)
set(RST_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/rest_output)
set(DOC_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/out)
set(DOC_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(DOC_SOURCE_WORKDIR ${CMAKE_CURRENT_BINARY_DIR}/sphinx-sources)
set(MASTER_POLICY_INDEX ${CMAKE_CURRENT_BINARY_DIR}/scripts/policy_index)
set(MASTER_PACKAGE_INDEX ${CMAKE_CURRENT_BINARY_DIR}/scripts/pkg_index)
file(GLOB_RECURSE DOC_SOURCES FOLLOW_SYMLINKS "*")
# configure the Sphinx config file (expand variables CMake might know about)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in
${CMAKE_CURRENT_BINARY_DIR}/conf.py
@ONLY)
add_subdirectory(scripts)
# The "broxygen" target generates reST documentation for any outdated bro
# scripts and then uses Sphinx to generate HTML documentation from the reST
add_custom_target(broxygen
# copy the template documentation to the build directory
# to give as input for sphinx
COMMAND "${CMAKE_COMMAND}" -E copy_directory
${DOC_SOURCE_DIR}
${DOC_SOURCE_WORKDIR}
# copy generated policy script documentation into the
# working copy of the template documentation
COMMAND "${CMAKE_COMMAND}" -E copy_directory
${RST_OUTPUT_DIR}
${DOC_SOURCE_WORKDIR}/scripts
# append to the master index of all policy scripts
COMMAND cat ${MASTER_POLICY_INDEX} >>
${DOC_SOURCE_WORKDIR}/scripts/index.rst
# append to the master index of all policy packages
COMMAND cat ${MASTER_PACKAGE_INDEX} >>
${DOC_SOURCE_WORKDIR}/scripts/packages.rst
# construct a reST file for each group
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/bin/group_index_generator.py
${CMAKE_CURRENT_BINARY_DIR}/scripts/group_list
${CMAKE_CURRENT_BINARY_DIR}/scripts
${DOC_SOURCE_WORKDIR}/scripts
# tell sphinx to generate html
COMMAND sphinx-build
-b html
-c ${CMAKE_CURRENT_BINARY_DIR}
-d ${DOC_OUTPUT_DIR}/doctrees
${DOC_SOURCE_WORKDIR}
${DOC_OUTPUT_DIR}/html
# create symlink to the html output directory for convenience
COMMAND "${CMAKE_COMMAND}" -E create_symlink
${DOC_OUTPUT_DIR}/html
${CMAKE_BINARY_DIR}/html
# copy Broccoli API reference into output dir if it exists
COMMAND test -d ${CMAKE_BINARY_DIR}/aux/broccoli/doc/html && ( rm -rf ${CMAKE_BINARY_DIR}/html/broccoli-api && cp -r ${CMAKE_BINARY_DIR}/aux/broccoli/doc/html ${CMAKE_BINARY_DIR}/html/broccoli-api ) || true
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "[Sphinx] Generating HTML policy script docs"
# SOURCES just adds stuff to IDE projects as a convenience
SOURCES ${DOC_SOURCES})
# The "sphinxclean" target removes just the Sphinx input/output directories
# from the build directory.
add_custom_target(broxygenclean
COMMAND "${CMAKE_COMMAND}" -E remove_directory
${DOC_SOURCE_WORKDIR}
COMMAND "${CMAKE_COMMAND}" -E remove_directory
${DOC_OUTPUT_DIR}
VERBATIM)
add_dependencies(broxygen broxygenclean restdoc)
add_custom_target(doc)
add_custom_target(docclean)
add_dependencies(doc broxygen)
add_dependencies(docclean broxygenclean restclean)

1
doc/INSTALL.rst Symbolic link
View file

@ -0,0 +1 @@
../INSTALL

View file

@ -1,7 +0,0 @@
all:
test -d html || mkdir html
for i in *.rst; do echo "$$i ..."; ./bin/rst2html.py $$i >html/`echo $$i | sed 's/rst$$/html/g'`; done
clean:
rm -rf html

View file

@ -2,32 +2,41 @@
Documentation
=============
This directory contains Bro documentation in reStructured text format
This directory contains Bro documentation in reStructuredText format
(see http://docutils.sourceforge.net/rst.html).
Please note that for now these files are primarily intended for use on
http://www.bro-ids.org. While the Bro build process will render local
versions into ``build/doc/`` (if docutils is found), the resulting
HTML is very minimalistic and some features are not supported. In
particular, some links will be broken.
It is the root of a Sphinx source tree and can be modified to add more
common/general documentation, style sheets, JavaScript, etc. The Sphinx
config file is produced from ``conf.py.in``, and can be edited to change
various Sphinx options.
There is also a custom Sphinx domain implemented in ``source/ext/bro.py``
which adds some reST directives and roles that aid in generating useful
index entries and cross-references. Other extensions can be added in
a similar fashion.
Either the ``make doc`` or ``make broxygen`` targets in the top-level
Makefile can be used to locally render the reST files into HTML.
Those targets depend on:
* Python interpreter >= 2.5
* `Sphinx <http://sphinx.pocoo.org/>`_ >= 1.0.1
After completion, HTML documentation is symlinked in ``build/html``.
There's also ``make docclean`` and ``make broxygenclean`` targets to
clean the resulting documentation.
Notes for Writing Documentation
-------------------------------
* If you want to refer to a Bro script that's part of the
distribution, use {{'`foo.bro
<{{autodoc_bro_scripts}}/path/to/foo.html>`_'}}. For example,
``{{'{{autodoc_bro_scripts}}/scripts/base/frameworks/notice/main.html}}'}}``.
* If you want to refer to a document that's part of the
distribution, it currently needs to be copied or otherwise symlinked
somewhere in to this Sphinx source tree. Then, it can be referenced
in a toc tree or with the :doc: role. Use the :download: role to
refer to static files that will not undergo sphinx rendering.
* If you want to refer to a page on the Bro web site, use the
``docroot`` macro (e.g.,
``{{'href="{{docroot}}/download/index.html"'}}). Make sure to
include the ``index.html`` for the main pages, just as in the
example.
* If you want to refer to page inside this directory, use a relative
path with HTML extension. (e.g., ``href="quickstart.html``).
* If you want to refer to a page on the Bro web site, use an HTTP URL.
Guidelines
----------

1
doc/_static/960.css vendored Normal file

File diff suppressed because one or more lines are too long

513
doc/_static/basic.css vendored Normal file
View file

@ -0,0 +1,513 @@
/*
* basic.css
* ~~~~~~~~~
*
* Sphinx stylesheet -- basic theme.
*
* :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
/* -- main layout ----------------------------------------------------------- */
div.clearer {
clear: both;
}
/* -- relbar ---------------------------------------------------------------- */
div.related {
width: 100%;
font-size: 90%;
}
div.related h3 {
display: none;
}
div.related ul {
margin: 0;
padding: 0 0 0 10px;
list-style: none;
}
div.related li {
display: inline;
}
div.related li.right {
float: right;
margin-right: 5px;
}
/* -- sidebar --------------------------------------------------------------- */
div.sphinxsidebarwrapper {
padding: 10px 5px 0 10px;
}
div.sphinxsidebar {
float: left;
width: 230px;
margin-left: -100%;
font-size: 90%;
}
div.sphinxsidebar ul {
list-style: none;
}
div.sphinxsidebar ul ul,
div.sphinxsidebar ul.want-points {
margin-left: 20px;
list-style: square;
}
div.sphinxsidebar ul ul {
margin-top: 0;
margin-bottom: 0;
}
div.sphinxsidebar form {
margin-top: 10px;
}
div.sphinxsidebar input {
border: 1px solid #98dbcc;
font-family: sans-serif;
font-size: 1em;
}
div.sphinxsidebar input[type="text"] {
width: 170px;
}
div.sphinxsidebar input[type="submit"] {
width: 30px;
}
img {
border: 0;
}
/* -- search page ----------------------------------------------------------- */
ul.search {
margin: 10px 0 0 20px;
padding: 0;
}
ul.search li {
padding: 5px 0 5px 20px;
background-image: url(file.png);
background-repeat: no-repeat;
background-position: 0 7px;
}
ul.search li a {
font-weight: bold;
}
ul.search li div.context {
color: #888;
margin: 2px 0 0 30px;
text-align: left;
}
ul.keywordmatches li.goodmatch a {
font-weight: bold;
}
/* -- index page ------------------------------------------------------------ */
table.contentstable {
width: 90%;
}
table.contentstable p.biglink {
line-height: 150%;
}
a.biglink {
font-size: 1.3em;
}
span.linkdescr {
font-style: italic;
padding-top: 5px;
font-size: 90%;
}
/* -- general index --------------------------------------------------------- */
table.indextable {
width: 100%;
}
table.indextable td {
text-align: left;
vertical-align: top;
}
table.indextable dl, table.indextable dd {
margin-top: 0;
margin-bottom: 0;
}
table.indextable tr.pcap {
height: 10px;
}
table.indextable tr.cap {
margin-top: 10px;
background-color: #f2f2f2;
}
img.toggler {
margin-right: 3px;
margin-top: 3px;
cursor: pointer;
}
div.modindex-jumpbox {
border-top: 1px solid #ddd;
border-bottom: 1px solid #ddd;
margin: 1em 0 1em 0;
padding: 0.4em;
}
div.genindex-jumpbox {
border-top: 1px solid #ddd;
border-bottom: 1px solid #ddd;
margin: 1em 0 1em 0;
padding: 0.4em;
}
/* -- general body styles --------------------------------------------------- */
a.headerlink {
visibility: hidden;
}
div.body p.caption {
text-align: inherit;
}
div.body td {
text-align: left;
}
.field-list ul {
padding-left: 1em;
}
.first {
margin-top: 0 !important;
}
p.rubric {
margin-top: 30px;
font-weight: bold;
}
img.align-left, .figure.align-left, object.align-left {
clear: left;
float: left;
margin-right: 1em;
}
img.align-right, .figure.align-right, object.align-right {
clear: right;
float: right;
margin-left: 1em;
}
img.align-center, .figure.align-center, object.align-center {
display: block;
margin-left: auto;
margin-right: auto;
}
.align-left {
text-align: left;
}
.align-center {
text-align: center;
}
.align-right {
text-align: right;
}
/* -- sidebars -------------------------------------------------------------- */
div.sidebar {
margin: 0 0 0.5em 1em;
border: 1px solid #ddb;
padding: 7px 7px 0 7px;
background-color: #ffe;
width: 40%;
float: right;
}
p.sidebar-title {
font-weight: bold;
}
/* -- topics ---------------------------------------------------------------- */
div.topic {
border: 1px solid #ccc;
padding: 7px 7px 0 7px;
margin: 10px 0 10px 0;
}
p.topic-title {
font-size: 1.1em;
font-weight: bold;
margin-top: 10px;
}
/* -- admonitions ----------------------------------------------------------- */
div.admonition {
margin-top: 10px;
margin-bottom: 10px;
padding: 7px;
}
div.admonition dt {
font-weight: bold;
}
div.admonition dl {
margin-bottom: 0;
}
p.admonition-title {
margin: 0px 10px 5px 0px;
font-weight: bold;
}
div.body p.centered {
text-align: center;
margin-top: 25px;
}
/* -- tables ---------------------------------------------------------------- */
table.field-list td, table.field-list th {
border: 0 !important;
}
table.footnote td, table.footnote th {
border: 0 !important;
}
th {
text-align: left;
padding-right: 5px;
}
table.citation {
border-left: solid 1px gray;
margin-left: 1px;
}
table.citation td {
border-bottom: none;
}
/* -- other body styles ----------------------------------------------------- */
ol.arabic {
list-style: decimal;
}
ol.loweralpha {
list-style: lower-alpha;
}
ol.upperalpha {
list-style: upper-alpha;
}
ol.lowerroman {
list-style: lower-roman;
}
ol.upperroman {
list-style: upper-roman;
}
dd p {
margin-top: 0px;
}
dd ul, dd table {
margin-bottom: 10px;
}
dd {
margin-top: 3px;
margin-bottom: 10px;
margin-left: 30px;
}
dt:target, .highlighted {
background-color: #fbe54e;
}
dl.glossary dt {
font-weight: bold;
font-size: 1.1em;
}
.field-list ul {
margin: 0;
padding-left: 1em;
}
.field-list p {
margin: 0;
}
.refcount {
color: #060;
}
.optional {
font-size: 1.3em;
}
.versionmodified {
font-style: italic;
}
.system-message {
background-color: #fda;
padding: 5px;
border: 3px solid red;
}
.footnote:target {
background-color: #ffa;
}
.line-block {
display: block;
margin-top: 1em;
margin-bottom: 1em;
}
.line-block .line-block {
margin-top: 0;
margin-bottom: 0;
margin-left: 1.5em;
}
.guilabel, .menuselection {
font-family: sans-serif;
}
.accelerator {
text-decoration: underline;
}
.classifier {
font-style: oblique;
}
abbr, acronym {
border-bottom: dotted 1px;
cursor: help;
}
/* -- code displays --------------------------------------------------------- */
pre {
overflow: auto;
overflow-y: hidden; /* fixes display issues on Chrome browsers */
}
td.linenos pre {
padding: 5px 0px;
border: 0;
background-color: transparent;
color: #aaa;
}
table.highlighttable {
margin-left: 0.5em;
}
table.highlighttable td {
padding: 0 0.5em 0 0.5em;
}
tt.descname {
background-color: transparent;
font-weight: bold;
# font-size: 1.2em;
}
tt.descclassname {
background-color: transparent;
}
tt.xref, a tt {
background-color: transparent;
# font-weight: bold;
}
h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
background-color: transparent;
}
.viewcode-link {
float: right;
}
.viewcode-back {
float: right;
font-family: sans-serif;
}
div.viewcode-block:target {
margin: -1px -10px;
padding: 0 10px;
}
/* -- math display ---------------------------------------------------------- */
img.math {
vertical-align: middle;
}
div.body div.math p {
text-align: center;
}
span.eqno {
float: right;
}
/* -- printout stylesheet --------------------------------------------------- */
@media print {
div.document,
div.documentwrapper,
div.bodywrapper {
margin: 0 !important;
width: 100%;
}
div.sphinxsidebar,
div.related,
div.footer,
#top-link {
display: none;
}
}

160
doc/_static/broxygen-extra.css vendored Normal file
View file

@ -0,0 +1,160 @@
a.toc-backref {
color: #333;
}
h1, h2, h3, h4, h5, h6,
h1 a, h2 a, h3 a, h4 a, h5 a, h6 a {
padding:0 0 0px 0;
}
ul {
padding-bottom: 0px;
}
h1 {
font-weight: bold;
font-size: 32px;
line-height:32px;
text-align: center;
padding-top: 3px;
margin-bottom: 30px;
font-family: Palatino,'Palatino Linotype',Georgia,serif;;
color: #000;
border-bottom: 0px;
}
th.field-name
{
white-space:nowrap;
}
h2 {
margin-top: 50px;
padding-bottom: 5px;
margin-bottom: 30px;
border-bottom: 1px solid;
border-color: #aaa;
font-style: normal;
}
div.section h3 {
font-style: normal;
}
h3 {
font-size: 20px;
margin-top: 40px;
margin-bottom: 0¡px;
font-weight: bold;
font-style: normal;
}
h3.widgettitle {
font-style: normal;
}
h4 {
font-size:18px;
font-style: normal;
margin-bottom: 0em;
margin-top: 40px;
font-style: italic;
}
h5 {
font-size:16px;
}
h6 {
font-size:15px;
}
.toc-backref {
color: #333;
}
.contents ul {
padding-bottom: 1em;
}
dl.namespace {
display: none;
}
dl dt {
font-weight: normal;
}
table.docutils tbody {
margin: 1em 1em 1em 1em;
}
table.docutils td {
padding: 5pt 5pt 5pt 5pt;
font-size: 14px;
border-left: 0;
border-right: 0;
}
dl pre {
font-size: 14px;
}
table.docutils th {
padding: 5pt 5pt 5pt 5pt;
font-size: 14px;
font-style: normal;
border-left: 0;
border-right: 0;
}
table.docutils tr:first-child td {
#border-top: 1px solid #aaa;
}
.download {
font-family:"Courier New", Courier, mono;
font-weight: normal;
}
dt:target, .highlighted {
background-color: #ccc;
}
p {
padding-bottom: 0px;
}
p.last {
margin-bottom: 0px;
}
dl {
padding: 1em 1em 1em 1em;
background: #fffff0;
border: 1px solid #aaa;
}
dl {
margin-bottom: 10px;
}
table.docutils {
background: #fffff0;
border-collapse: collapse;
border: 1px solid #ddd;
}
dl table.docutils {
border: 0;
}
table.docutils dl {
border: 1px dashed #666;
}

0
doc/_static/broxygen-extra.js vendored Normal file
View file

437
doc/_static/broxygen.css vendored Normal file
View file

@ -0,0 +1,437 @@
/* Automatically generated. Do not edit. */
#bro-main, #bro-standalone-main {
padding: 0 0 0 0;
position:relative;
z-index:1;
}
#bro-main {
margin-bottom: 2em;
}
#bro-standalone-main {
margin-bottom: 0em;
padding-left: 50px;
padding-right: 50px;
}
#bro-outer {
color: #333;
background: #ffffff;
}
#bro-title {
font-weight: bold;
font-size: 32px;
line-height:32px;
text-align: center;
padding-top: 3px;
margin-bottom: 30px;
font-family: Palatino,'Palatino Linotype',Georgia,serif;;
color: #000;
}
.opening:first-letter {
font-size: 24px;
font-weight: bold;
letter-spacing: 0.05em;
}
.opening {
font-size: 17px;
}
.version {
text-align: right;
font-size: 12px;
color: #aaa;
line-height: 0;
height: 0;
}
.git-info-version {
position: relative;
height: 2em;
top: -1em;
color: #ccc;
float: left;
font-size: 12px;
}
.git-info-date {
position: relative;
height: 2em;
top: -1em;
color: #ccc;
float: right;
font-size: 12px;
}
body {
font-family:Arial, Helvetica, sans-serif;
font-size:15px;
line-height:22px;
color: #333;
margin: 0px;
}
h1, h2, h3, h4, h5, h6,
h1 a, h2 a, h3 a, h4 a, h5 a, h6 a {
padding:0 0 20px 0;
font-weight:bold;
text-decoration:none;
}
div.section h3, div.section h4, div.section h5, div.section h6 {
font-style: italic;
}
h1, h2 {
font-size:27px;
letter-spacing:-1px;
}
h3 {
margin-top: 1em;
font-size:18px;
}
h4 {
font-size:16px;
}
h5 {
font-size:15px;
}
h6 {
font-size:12px;
}
p {
padding:0 0 20px 0;
}
hr {
background:none;
height:1px;
line-height:1px;
border:0;
margin:0 0 20px 0;
}
ul, ol {
margin:0 20px 20px 0;
padding-left:40px;
}
ul.simple, ol.simple {
margin:0 0px 0px 0;
}
blockquote {
margin:0 0 0 40px;
}
strong, dfn {
font-weight:bold;
}
em, dfn {
font-style:italic;
}
sup, sub {
line-height:0;
}
pre {
white-space:pre;
}
pre, code, tt {
font-family:"Courier New", Courier, mono;
}
dl {
margin: 0 0 20px 0;
}
dl dt {
font-weight: bold;
}
dd {
margin:0 0 20px 20px;
}
small {
font-size:75%;
}
a:link,
a:visited,
a:active
{
color: #2a85a7;
}
a:hover
{
color:#c24444;
}
h1, h2, h3, h4, h5, h6,
h1 a, h2 a, h3 a, h4 a, h5 a, h6 a
{
color: #333;
}
hr {
border-bottom:1px solid #ddd;
}
pre {
color: #333;
background: #FFFAE2;
padding: 7px 5px 3px 5px;
margin-bottom: 25px;
margin-top: 0px;
}
ul {
padding-bottom: 5px;
}
h1, h2 {
margin-top: 30px;
}
h1 {
margin-bottom: 50px;
margin-bottom: 20px;
padding-bottom: 5px;
border-bottom: 1px solid;
border-color: #aaa;
}
h2 {
font-size: 24px;
}
pre {
-moz-box-shadow:0 0 6px #ddd;
-webkit-box-shadow:0 0 6px #ddd;
box-shadow:0 0 6px #ddd;
}
a {
text-decoration:none;
}
p {
padding-bottom: 15px;
}
p, dd, li {
text-align: justify;
}
li {
margin-bottom: 5px;
}
#footer .widget_links ul a,
#footer .widget_links ol a
{
color: #ddd;
}
#footer .widget_links ul a:hover,
#footer .widget_links ol a:hover
{
color:#c24444;
}
#footer .widget li {
padding-bottom:10px;
}
#footer .widget_links li {
padding-bottom:1px;
}
#footer .widget li:last-child {
padding-bottom:0;
}
#footer .widgettitle {
color: #ddd;
}
.widget {
margin:0 0 40px 0;
}
.widget, .widgettitle {
font-size:12px;
line-height:18px;
}
.widgettitle {
font-weight:bold;
text-transform:uppercase;
padding:0 0 10px 0;
margin:0 0 20px 0;
line-height:100%;
}
.widget UL, .widget OL {
list-style-type:none;
margin:0;
padding:0;
}
.widget p {
padding:0;
}
.widget li {
padding-bottom:10px;
}
.widget a {
text-decoration:none;
}
#bro-main .widgettitle,
{
color: #333;
}
.widget img.left {
padding:5px 10px 10px 0;
}
.widget img.right {
padding:5px 0 10px 10px;
}
.ads .widgettitle {
margin-right:16px;
}
.widget {
margin-left: 1em;
}
.widgettitle {
color: #333;
}
.widgettitle {
border-bottom:1px solid #ddd;
}
.sidebar-toc ul li {
padding-bottom: 0px;
text-align: left;
list-style-type: square;
list-style-position: inside;
padding-left: 1em;
text-indent: -1em;
}
.sidebar-toc ul li li {
margin-left: 1em;
margin-bottom: 0px;
list-style-type: square;
}
.sidebar-toc ul li li a {
font-size: 8pt;
}
.contents {
padding: 10px;
background: #FFFAE2;
margin: 20px;
}
.topic-title {
font-size: 20px;
font-weight: bold;
padding: 0px 0px 5px 0px;
text-align: center;
padding-top: .5em;
}
.contents li {
margin-bottom: 0px;
list-style-type: square;
}
.contents ul ul li {
margin-left: 0px;
padding-left: 0px;
padding-top: 0em;
font-size: 90%;
list-style-type: square;
font-weight: normal;
}
.contents ul ul ul li {
list-style-type: none;
}
.contents ul ul ul ul li {
display:none;
}
.contents ul li {
padding-top: 1em;
list-style-type: none;
font-weight: bold;
}
.contents ul {
margin-left: 0px;
padding-left: 2em;
margin: 0px 0px 0px 0px;
}
.note, .warning, .error {
margin-left: 2em;
margin-right: 2em;
margin-top: 1.5em;
margin-bottom: 1.5em;
padding: 0.5em 1em 0.5em 1em;
overflow: auto;
border-left: solid 3px #aaa;
font-size: 15px;
color: #333;
}
.admonition p {
margin-left: 1em;
}
.admonition-title {
font-size: 16px;
font-weight: bold;
color: #000;
padding-bottom: 0em;
margin-bottom: .5em;
margin-top: 0em;
}

BIN
doc/_static/logo-bro.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

58
doc/_static/pygments.css vendored Normal file
View file

@ -0,0 +1,58 @@
.hll { background-color: #ffffcc }
.c { color: #aaaaaa; font-style: italic } /* Comment */
.err { color: #F00000; background-color: #F0A0A0 } /* Error */
.k { color: #0000aa } /* Keyword */
.cm { color: #aaaaaa; font-style: italic } /* Comment.Multiline */
.cp { color: #4c8317 } /* Comment.Preproc */
.c1 { color: #aaaaaa; font-style: italic } /* Comment.Single */
.cs { color: #0000aa; font-style: italic } /* Comment.Special */
.gd { color: #aa0000 } /* Generic.Deleted */
.ge { font-style: italic } /* Generic.Emph */
.gr { color: #aa0000 } /* Generic.Error */
.gh { color: #000080; font-weight: bold } /* Generic.Heading */
.gi { color: #00aa00 } /* Generic.Inserted */
.go { color: #888888 } /* Generic.Output */
.gp { color: #555555 } /* Generic.Prompt */
.gs { font-weight: bold } /* Generic.Strong */
.gu { color: #800080; font-weight: bold } /* Generic.Subheading */
.gt { color: #aa0000 } /* Generic.Traceback */
.kc { color: #0000aa } /* Keyword.Constant */
.kd { color: #0000aa } /* Keyword.Declaration */
.kn { color: #0000aa } /* Keyword.Namespace */
.kp { color: #0000aa } /* Keyword.Pseudo */
.kr { color: #0000aa } /* Keyword.Reserved */
.kt { color: #00aaaa } /* Keyword.Type */
.m { color: #009999 } /* Literal.Number */
.s { color: #aa5500 } /* Literal.String */
.na { color: #1e90ff } /* Name.Attribute */
.nb { color: #00aaaa } /* Name.Builtin */
.nc { color: #00aa00; text-decoration: underline } /* Name.Class */
.no { color: #aa0000 } /* Name.Constant */
.nd { color: #888888 } /* Name.Decorator */
.ni { color: #800000; font-weight: bold } /* Name.Entity */
.nf { color: #00aa00 } /* Name.Function */
.nn { color: #00aaaa; text-decoration: underline } /* Name.Namespace */
.nt { color: #1e90ff; font-weight: bold } /* Name.Tag */
.nv { color: #aa0000 } /* Name.Variable */
.ow { color: #0000aa } /* Operator.Word */
.w { color: #bbbbbb } /* Text.Whitespace */
.mf { color: #009999 } /* Literal.Number.Float */
.mh { color: #009999 } /* Literal.Number.Hex */
.mi { color: #009999 } /* Literal.Number.Integer */
.mo { color: #009999 } /* Literal.Number.Oct */
.sb { color: #aa5500 } /* Literal.String.Backtick */
.sc { color: #aa5500 } /* Literal.String.Char */
.sd { color: #aa5500 } /* Literal.String.Doc */
.s2 { color: #aa5500 } /* Literal.String.Double */
.se { color: #aa5500 } /* Literal.String.Escape */
.sh { color: #aa5500 } /* Literal.String.Heredoc */
.si { color: #aa5500 } /* Literal.String.Interpol */
.sx { color: #aa5500 } /* Literal.String.Other */
.sr { color: #009999 } /* Literal.String.Regex */
.s1 { color: #aa5500 } /* Literal.String.Single */
.ss { color: #0000aa } /* Literal.String.Symbol */
.bp { color: #00aaaa } /* Name.Builtin.Pseudo */
.vc { color: #aa0000 } /* Name.Variable.Class */
.vg { color: #aa0000 } /* Name.Variable.Global */
.vi { color: #aa0000 } /* Name.Variable.Instance */
.il { color: #009999 } /* Literal.Number.Integer.Long */

113
doc/_templates/layout.html vendored Normal file
View file

@ -0,0 +1,113 @@
{% extends "!layout.html" %}
{% block extrahead %}
<link rel="stylesheet" type="text/css" href="{{ pathto('_static/broxygen.css', 1) }}"></script>
<link rel="stylesheet" type="text/css" href="{{ pathto('_static/960.css', 1) }}"></script>
<link rel="stylesheet" type="text/css" href="{{ pathto('_static/pygments.css', 1) }}"></script>
<link rel="stylesheet" type="text/css" href="{{ pathto('_static/broxygen-extra.css', 1) }}"></script>
<script type="text/javascript" src="{{ pathto('_static/broxygen-extra.js', 1) }}"></script>
{% endblock %}
{% block header %}
<iframe src="http://www.bro-ids.org/frames/header-no-logo.html" width="100%" height="100px" frameborder="0" marginheight="0" scrolling="no" marginwidth="0">
</iframe>
{% endblock %}
{% block relbar2 %}{% endblock %}
{% block relbar1 %}{% endblock %}
{% block content %}
<div id="bro-main" class="clearfix">
<div class="container_12">
<div class="grid_9">
<div>
{{ relbar() }}
</div>
<div class="body">
{% block body %}
{% endblock %}
</div>
</div>
<!-- Sidebar -->
<div class="grid_3 omega">
<div>
<img id="logo" src="{{pathto('_static/logo-bro.png', 1)}}" alt="Logo" />
</div>
<br />
<div class="widget sidebar-toc">
<h3 class="widgettitle">
Table of Contents
</h3>
<p>
<!-- <ul id="sidebar-toc"></ul> -->
<ul>{{toc}}</ul>
</p>
</div>
{% if next %}
<div class="widget">
<h3 class="widgettitle">
Next Page
</h3>
<p>
<a href="{{ next.link|e }}">{{ next.title }}</a>
</p>
</div>
{% endif %}
{% if prev %}
<div class="widget">
<h3 class="widgettitle">
Previous Page
</h3>
<p>
<a href="{{ prev.link|e }}">{{ prev.title }}</a>
</p>
</div>
{% endif %}
{%- if pagename != "search" %}
<div id="searchbox" style="display: none" class="widget">
<h3 class="widgettitle">{{ _('Search') }}</h3>
<form class="search" action="{{ pathto('search') }}" method="get">
<input type="text" name="q" />
<input type="submit" value="{{ _('Search') }}" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
</div>
<script type="text/javascript">$('#searchbox').show(0);</script>
{%- endif %}
</div>
</div>
<div class="container_12">
<div class="grid_12 alpha omega">
<div class="center">
<small>
Copyright {{ copyright }}.
Last updated on {{ last_updated }}.
Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> {{ sphinx_version }}.
</small>
</div>
</div>
</div>
</div>
{% endblock %}
{% block footer %}
<iframe src="http://www.bro-ids.org/frames/footer.html" width="100%" height="420px" frameborder="0" marginheight="0" scrolling="no" marginwidth="0">
</iframe>
{% endblock %}

View file

@ -49,6 +49,7 @@ with open(group_list, 'r') as f_group_list:
if not os.path.exists(os.path.dirname(group_file)):
os.makedirs(os.path.dirname(group_file))
with open(group_file, 'w') as f_group_file:
f_group_file.write(":orphan:\n\n")
title = "Package Index: %s\n" % os.path.dirname(group)
f_group_file.write(title);
for n in range(len(title)):

View file

@ -1,62 +0,0 @@
#!/usr/bin/env python
#
# Derived from docutils standard rst2html.py.
#
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
#
#
# Extension: we add to dummy directorives "code" and "console" to be
# compatible with Bro's web site setup.
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
import textwrap
from docutils.core import publish_cmdline, default_description
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from docutils.parsers.rst.directives.body import LineBlock
class Literal(Directive):
#max_line_length = 68
max_line_length = 0
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
has_content = True
def wrapped_content(self):
content = []
if Literal.max_line_length:
for line in self.content:
content += textwrap.wrap(line, Literal.max_line_length, subsequent_indent=" ")
else:
content = self.content
return u'\n'.join(content)
def run(self):
self.assert_has_content()
content = self.wrapped_content()
literal = nodes.literal_block(content, content)
return [literal]
directives.register_directive('code', Literal)
directives.register_directive('console', Literal)
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)

View file

@ -11,9 +11,7 @@ Architecture
The figure below illustrates the main components of a Bro cluster.
.. {{git_pull('bro:doc/deployment.png')}}
.. image:: deployment.bro.png
.. image:: images/deployment.png
Tap
***
@ -47,7 +45,7 @@ This is the Bro process that sniffs network traffic and does protocol analysis o
The rule of thumb we have followed recently is to allocate approximately 1 core for every 80Mbps of traffic that is being analyzed, however this estimate could be extremely traffic mix specific. It has generally worked for mixed traffic with many users and servers. For example, if your traffic peaks around 2Gbps (combined) and you want to handle traffic at peak load, you may want to have 26 cores available (2048 / 80 == 25.6). If the 80Mbps estimate works for your traffic, this could be handled by 3 physical hosts dedicated to being workers with each one containing dual 6-core processors.
Once a flow based load balancer is put into place this model is extremely easy to scale as well so its recommended that you guess at the amount of hardware you will need to fully analyze your traffic. If it turns out that you need more, its relatively easy to easy increase the size of the cluster in most cases.
Once a flow based load balancer is put into place this model is extremely easy to scale as well so its recommended that you guess at the amount of hardware you will need to fully analyze your traffic. If it turns out that you need more, its relatively easy to increase the size of the cluster in most cases.
Frontend Options
----------------
@ -60,7 +58,7 @@ Discrete hardware flow balancers
cPacket
^^^^^^^
If you are monitoring one or more 10G physical interfaces, the recommended solution is to use either a cFlow or cVu device from cPacket because they are currently being used very successfully at a number of sites. These devices will perform layer-2 load balancing by rewriting the destination ethernet MAC address to cause each packet associated with a particular flow to have the same destination MAC. The packets can then be passed directly to a monitoring host where each worker has a BPF filter to limit it's visibility to only that stream of flows or onward to a commodity switch to split the traffic out to multiple 1G interfaces for the workers. This can ultimately greatly reduce costs since workers can use relatively inexpensive 1G interfaces.
If you are monitoring one or more 10G physical interfaces, the recommended solution is to use either a cFlow or cVu device from cPacket because they are currently being used very successfully at a number of sites. These devices will perform layer-2 load balancing by rewriting the destination ethernet MAC address to cause each packet associated with a particular flow to have the same destination MAC. The packets can then be passed directly to a monitoring host where each worker has a BPF filter to limit its visibility to only that stream of flows or onward to a commodity switch to split the traffic out to multiple 1G interfaces for the workers. This can ultimately greatly reduce costs since workers can use relatively inexpensive 1G interfaces.
OpenFlow Switches
^^^^^^^^^^^^^^^^^
@ -78,7 +76,7 @@ The PF_RING software for Linux has a “clustering” feature which will do flow
Netmap
^^^^^^
FreeBSD has an in-progress project named Netmap which will enabled flow based load balancing as well. When it becomes viable for real world use, this document will be updated.
FreeBSD has an in-progress project named Netmap which will enable flow based load balancing as well. When it becomes viable for real world use, this document will be updated.
Click! Software Router
^^^^^^^^^^^^^^^^^^^^^^

View file

@ -0,0 +1 @@
../../../aux/binpac/README

View file

@ -0,0 +1 @@
../../../aux/bro-aux/README

View file

@ -0,0 +1 @@
../../../aux/broccoli/bindings/broccoli-python/README

View file

@ -0,0 +1 @@
../../../aux/broccoli/bindings/broccoli-ruby/README

View file

@ -0,0 +1 @@
../../../aux/broccoli/README

View file

@ -0,0 +1 @@
../../../aux/broccoli/doc/broccoli-manual.rst

View file

@ -0,0 +1 @@
../../../aux/broctl/doc/broctl.rst

View file

@ -0,0 +1 @@
../../../aux/btest/README

View file

@ -0,0 +1 @@
../../../aux/broctl/aux/capstats/README

View file

@ -0,0 +1 @@
../../../aux/broctl/aux/pysubnettree/README

View file

@ -0,0 +1 @@
../../../aux/broctl/aux/trace-summary/README

View file

@ -15,7 +15,7 @@ import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('source/ext'))
sys.path.insert(0, os.path.abspath('sphinx-sources/ext'))
# -- General configuration -----------------------------------------------------
@ -24,10 +24,10 @@ sys.path.insert(0, os.path.abspath('source/ext'))
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['bro']
extensions = ['bro', 'rst_directive', 'sphinx.ext.todo', 'adapt-toc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['source/_templates']
templates_path = ['sphinx-sources/_templates', 'sphinx-sources/_static']
# The suffix of source filenames.
source_suffix = '.rst'
@ -40,7 +40,7 @@ master_doc = 'index'
# General information about the project.
project = u'Bro'
copyright = u'2011, The Bro Project'
copyright = u'2012, The Bro Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@ -59,7 +59,7 @@ release = '@VERSION@'
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
@ -90,18 +90,20 @@ pygments_style = 'sphinx'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
html_theme = 'basic'
html_last_updated_fmt = '%B %d, %Y'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = { }
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# "<project> v<release> Documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
@ -119,7 +121,7 @@ html_theme = 'default'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['source/_static']
html_static_path = ['sphinx-sources/_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
@ -130,7 +132,9 @@ html_static_path = ['source/_static']
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_sidebars = {
'**': ['localtoc.html', 'sourcelink.html', 'searchbox.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
@ -163,8 +167,9 @@ html_static_path = ['source/_static']
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Brodoc'
htmlhelp_basename = 'Broxygen'
html_add_permalinks = None
# -- Options for LaTeX output --------------------------------------------------
@ -204,7 +209,6 @@ latex_documents = [
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
@ -213,3 +217,6 @@ man_pages = [
('index', 'bro', u'Bro Documentation',
[u'The Bro Project'], 1)
]
# -- Options for todo plugin --------------------------------------------
todo_include_todos=True

29
doc/ext/adapt-toc.py Normal file
View file

@ -0,0 +1,29 @@
import sys
import re
# Removes the first TOC level, which is just the page title.
def process_html_toc(app, pagename, templatename, context, doctree):
if not "toc" in context:
return
toc = context["toc"]
lines = toc.strip().split("\n")
lines = lines[2:-2]
toc = "\n".join(lines)
toc = "<ul>" + toc
context["toc"] = toc
# print >>sys.stderr, pagename
# print >>sys.stderr, context["toc"]
# print >>sys.stderr, "-----"
# print >>sys.stderr, toc
# print >>sys.stderr, "===="
def setup(app):
app.connect('html-page-context', process_html_toc)

View file

@ -4,6 +4,9 @@
def setup(Sphinx):
Sphinx.add_domain(BroDomain)
Sphinx.add_node(see)
Sphinx.add_directive_to_domain('bro', 'see', SeeDirective)
Sphinx.connect('doctree-resolved', process_see_nodes)
from sphinx import addnodes
from sphinx.domains import Domain, ObjType, Index
@ -18,7 +21,57 @@ from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.parsers.rst.roles import set_classes
class see(nodes.General, nodes.Element):
refs = []
class SeeDirective(Directive):
has_content = True
def run(self):
n = see('')
n.refs = string.split(string.join(self.content))
return [n]
def process_see_nodes(app, doctree, fromdocname):
for node in doctree.traverse(see):
content = []
para = nodes.paragraph()
para += nodes.Text("See also:", "See also:")
for name in node.refs:
join_str = " "
if name != node.refs[0]:
join_str = ", "
link_txt = join_str + name;
if name not in app.env.domaindata['bro']['idtypes']:
# Just create the text and issue warning
app.env.warn(fromdocname,
'unknown target for ".. bro:see:: %s"' % (name))
para += nodes.Text(link_txt, link_txt)
else:
# Create a reference
typ = app.env.domaindata['bro']['idtypes'][name]
todocname = app.env.domaindata['bro']['objects'][(typ, name)]
newnode = nodes.reference('', '')
innernode = nodes.literal(_(name), _(name))
newnode['refdocname'] = todocname
newnode['refuri'] = app.builder.get_relative_uri(
fromdocname, todocname)
newnode['refuri'] += '#' + typ + '-' + name
newnode.append(innernode)
para += nodes.Text(join_str, join_str)
para += newnode
content.append(para)
node.replace_self(content)
class BroGeneric(ObjectDescription):
def update_type_map(self, idname):
if 'idtypes' not in self.env.domaindata['bro']:
self.env.domaindata['bro']['idtypes'] = {}
self.env.domaindata['bro']['idtypes'][idname] = self.objtype
def add_target_and_index(self, name, sig, signode):
targetname = self.objtype + '-' + name
if targetname not in self.state.document.ids:
@ -29,9 +82,6 @@ class BroGeneric(ObjectDescription):
objects = self.env.domaindata['bro']['objects']
key = (self.objtype, name)
# this is commented out mostly just to avoid having a special directive
# for events in order to avoid the duplicate warnings in that case
"""
if key in objects:
self.env.warn(self.env.docname,
'duplicate description of %s %s, ' %
@ -39,8 +89,9 @@ class BroGeneric(ObjectDescription):
'other instance in ' +
self.env.doc2path(objects[key]),
self.lineno)
"""
objects[key] = self.env.docname
self.update_type_map(name)
indextext = self.get_index_text(self.objtype, name)
if indextext:
self.indexnode['entries'].append(('single', indextext,
@ -65,6 +116,8 @@ class BroNamespace(BroGeneric):
objects = self.env.domaindata['bro']['objects']
key = (self.objtype, name)
objects[key] = self.env.docname
self.update_type_map(name)
indextext = self.get_index_text(self.objtype, name)
self.indexnode['entries'].append(('single', indextext,
targetname, targetname))
@ -91,10 +144,17 @@ class BroEnum(BroGeneric):
objects = self.env.domaindata['bro']['objects']
key = (self.objtype, name)
objects[key] = self.env.docname
self.update_type_map(name)
indextext = self.get_index_text(self.objtype, name)
#self.indexnode['entries'].append(('single', indextext,
# targetname, targetname))
m = sig.split()
if m[1] == "Notice::Type":
if 'notices' not in self.env.domaindata['bro']:
self.env.domaindata['bro']['notices'] = []
self.env.domaindata['bro']['notices'].append(
(m[0], self.env.docname, targetname))
self.indexnode['entries'].append(('single',
"%s (enum values); %s" % (m[1], m[0]),
targetname, targetname))
@ -113,6 +173,26 @@ class BroAttribute(BroGeneric):
def get_index_text(self, objectname, name):
return _('%s (attribute)') % (name)
class BroNotices(Index):
"""
Index subclass to provide the Bro notices index.
"""
name = 'noticeindex'
localname = l_('Bro Notice Index')
shortname = l_('notices')
def generate(self, docnames=None):
content = {}
for n in self.domain.env.domaindata['bro']['notices']:
modname = n[0].split("::")[0]
entries = content.setdefault(modname, [])
entries.append([n[0], 0, n[1], n[2], '', '', ''])
content = sorted(content.iteritems())
return content, False
class BroDomain(Domain):
"""Bro domain."""
name = 'bro'
@ -140,8 +220,13 @@ class BroDomain(Domain):
'id': XRefRole(),
'enum': XRefRole(),
'attr': XRefRole(),
'see': XRefRole(),
}
indices = [
BroNotices,
]
initial_data = {
'objects': {}, # fullname -> docname, objtype
}
@ -154,13 +239,27 @@ class BroDomain(Domain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
objects = self.data['objects']
objtypes = self.objtypes_for_role(typ)
for objtype in objtypes:
if (objtype, target) in objects:
return make_refnode(builder, fromdocname,
objects[objtype, target],
objtype + '-' + target,
contnode, target + ' ' + objtype)
if typ == "see":
if target not in self.data['idtypes']:
self.env.warn(fromdocname,
'unknown target for ":bro:see:`%s`"' % (target))
return []
objtype = self.data['idtypes'][target]
return make_refnode(builder, fromdocname,
objects[objtype, target],
objtype + '-' + target,
contnode, target + ' ' + objtype)
else:
objtypes = self.objtypes_for_role(typ)
for objtype in objtypes:
if (objtype, target) in objects:
return make_refnode(builder, fromdocname,
objects[objtype, target],
objtype + '-' + target,
contnode, target + ' ' + objtype)
else:
self.env.warn(fromdocname,
'unknown target for ":bro:%s:`%s`"' % (typ, target))
def get_objects(self):
for (typ, name), docname in self.data['objects'].iteritems():

View file

Binary file not shown.

76
doc/ext/bro_lexer/bro.py Normal file
View file

@ -0,0 +1,76 @@
from pygments.lexer import RegexLexer, bygroups, include
from pygments.token import *
__all__ = ["BroLexer"]
class BroLexer(RegexLexer):
name = 'Bro'
aliases = ['bro']
filenames = ['*.bro']
_hex = r'[0-9a-fA-F_]+'
_float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?'
_h = r'[A-Za-z0-9][-A-Za-z0-9]*'
tokens = {
'root': [
# Whitespace
('^@.*?\n', Comment.Preproc),
(r'#.*?\n', Comment.Single),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
# Keywords
(r'(add|alarm|break|case|const|continue|delete|do|else|enum|event'
r'|export|for|function|if|global|local|module|next'
r'|of|print|redef|return|schedule|when|while)\b', Keyword),
(r'(addr|any|bool|count|counter|double|file|int|interval|net'
r'|pattern|port|record|set|string|subnet|table|time|timer'
r'|vector)\b', Keyword.Type),
(r'(T|F)\b', Keyword.Constant),
(r'(&)((?:add|delete|expire)_func|attr|(create|read|write)_expire'
r'|default|raw_output|encrypt|group|log'
r'|mergeable|optional|persistent|priority|redef'
r'|rotate_(?:interval|size)|synchronized)\b', bygroups(Punctuation,
Keyword)),
(r'\s+module\b', Keyword.Namespace),
# Addresses, ports and networks
(r'\d+/(tcp|udp|icmp|unknown)\b', Number),
(r'(\d+\.){3}\d+', Number),
(r'(' + _hex + r'){7}' + _hex, Number),
(r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number),
(r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number),
(r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number),
# Hostnames
(_h + r'(\.' + _h + r')+', String),
# Numeric
(_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date),
(r'0[xX]' + _hex, Number.Hex),
(_float, Number.Float),
(r'\d+', Number.Integer),
(r'/', String.Regex, 'regex'),
(r'"', String, 'string'),
# Operators
(r'[!%*/+-:<=>?~|]', Operator),
(r'([-+=&|]{2}|[+-=!><]=)', Operator),
(r'(in|match)\b', Operator.Word),
(r'[{}()\[\]$.,;]', Punctuation),
# Identfier
(r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)),
(r'[a-zA-Z_][a-zA-Z_0-9]*', Name)
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String),
(r'\\\n', String),
(r'\\', String)
],
'regex': [
(r'/', String.Regex, '#pop'),
(r'\\[\\nt/]', String.Regex), # String.Escape is too intense.
(r'[^\\/\n]+', String.Regex),
(r'\\\n', String.Regex),
(r'\\', String.Regex)
]
}

BIN
doc/ext/bro_lexer/bro.pyc Normal file

Binary file not shown.

180
doc/ext/rst_directive.py Normal file
View file

@ -0,0 +1,180 @@
def setup(app):
pass
# -*- coding: utf-8 -*-
"""
Modified version of the the Pygments reStructuredText directive. -Robin
This provides two new directives:
- .. code:: [<format>]
Highlights the following code block according to <format> if
given (e.g., "c", "python", etc.).
- .. console::
Highlits the following code block as a shell session.
For compatibility with the original version, "sourcecode" is
equivalent to "code".
Original comment:
The Pygments reStructuredText directive
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Docutils_ 0.5 directive that renders source code
(to HTML only, currently) via Pygments.
To use it, adjust the options below and copy the code into a module
that you import on initialization. The code then automatically
registers a ``sourcecode`` directive that you can use instead of
normal code blocks like this::
.. sourcecode:: python
My code goes here.
If you want to have different code styles, e.g. one with line numbers
and one without, add formatters with their names in the VARIANTS dict
below. You can invoke them instead of the DEFAULT one by using a
directive option::
.. sourcecode:: python
:linenos:
My code goes here.
Look at the `directive documentation`_ to get all the gory details.
.. _Docutils: http://docutils.sf.net/
.. _directive documentation:
http://docutils.sourceforge.net/docs/howto/rst-directives.html
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
from pygments.formatters import HtmlFormatter
class MyHtmlFormatter(HtmlFormatter):
def format_unencoded(self, tokensource, outfile):
# A NOP currently.
new_tokens = []
for (i, piece) in tokensource:
new_tokens += [(i, piece)]
return super(MyHtmlFormatter, self).format_unencoded(new_tokens, outfile)
# The default formatter
DEFAULT = MyHtmlFormatter(noclasses=INLINESTYLES, cssclass="pygments")
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
import textwrap
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer, TextLexer
from pygments.token import Text, Keyword, Error, Operator, Name
from pygments.filter import Filter
# Ugly hack to register the Bro lexer. I'm sure there's a better way to do it,
# but it's not obvious ...
from bro_lexer.bro import BroLexer
from pygments.lexers._mapping import LEXERS
LEXERS['BroLexer'] = ('bro_lexer.bro', BroLexer.name, BroLexer.aliases, BroLexer.filenames, ())
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
#max_line_length = 68
max_line_length = 0
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = dict([(key, directives.flag) for key in VARIANTS])
has_content = True
def wrapped_content(self):
content = []
if Console.max_line_length:
for line in self.content:
content += textwrap.wrap(line, Console.max_line_length, subsequent_indent=" ")
else:
content = self.content
return u'\n'.join(content)
def run(self):
self.assert_has_content()
content = self.wrapped_content()
if len(self.arguments) > 0:
try:
lexer = get_lexer_by_name(self.arguments[0])
except (ValueError, IndexError):
# lexer not found, use default.
lexer = TextLexer()
else:
lexer = guess_lexer(content)
# import sys
# print >>sys.stderr, self.arguments, lexer.__class__
# take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[self.options.keys()[0]] or DEFAULT
parsed = highlight(content, lexer, formatter)
return [nodes.raw('', parsed, format='html')]
class MyFilter(Filter):
def filter(self, lexer, stream):
bol = True
for (ttype, value) in stream:
# Color the '>' prompt sign.
if bol and ttype is Text and value == ">":
ttype = Name.Variable.Class # This gives us a nice red.
# Discolor builtin, that can look funny.
if ttype is Name.Builtin:
ttype = Text
bol = value.endswith("\n")
yield (ttype, value)
class Console(Pygments):
required_arguments = 0
optional_arguments = 0
def run(self):
self.assert_has_content()
content = self.wrapped_content()
lexer = get_lexer_by_name("sh")
lexer.add_filter(MyFilter())
parsed = highlight(content, lexer, DEFAULT)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('sourcecode', Pygments)
directives.register_directive('code', Pygments)
directives.register_directive('console', Console)

217
doc/faq.rst Normal file
View file

@ -0,0 +1,217 @@
==========================
Frequently Asked Questions
==========================
.. raw:: html
<div class="faq">
.. contents::
Installation and Configuration
==============================
How do I upgrade to a new version of Bro?
-----------------------------------------
There's two suggested approaches, either install Bro using the same
installation prefix directory as before, or pick a new prefix and copy
local customizations over.
Re-Use Previous Install Prefix
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you choose to configure and install Bro with the same prefix
directory as before, local customization and configuration to files in
``$prefix/share/bro/site`` and ``$prefix/etc`` won't be overwritten
(``$prefix`` indicating the root of where Bro was installed). Also, logs
generated at run-time won't be touched by the upgrade. (But making
a backup of local changes before proceeding is still recommended.)
After upgrading, remember to check ``$prefix/share/bro/site`` and
``$prefix/etc`` for ``.example`` files, which indicate the
distribution's version of the file differs from the local one, which may
include local changes. Review the differences, and make adjustments
as necessary (for differences that aren't the result of a local change,
use the new version's).
Pick a New Install prefix
^^^^^^^^^^^^^^^^^^^^^^^^^
If you want to install the newer version in a different prefix
directory than before, you can just copy local customization and
configuration files from ``$prefix/share/bro/site`` and ``$prefix/etc``
to the new location (``$prefix`` indicating the root of where Bro was
originally installed). Make sure to review the files for difference
before copying and make adjustments as necessary (for differences that
aren't the result of a local change, use the new version's). Of
particular note, the copied version of ``$prefix/etc/broctl.cfg`` is
likely to need changes to the ``SpoolDir`` and ``LogDir`` settings.
How can I tune my operating system for best capture performance?
----------------------------------------------------------------
Here are some pointers to more information:
* Fabian Schneider's research on `high performance packet capture
<http://www.net.t-labs.tu-berlin.de/research/hppc>`_
* `NSMWiki <http://nsmwiki.org/Main_Page>`_ has page on
*Collecting Data*.
* An `IMC 2010 paper
<http://conferences.sigcomm.org/imc/2010/papers/p206.pdf>`_ by
Lothar Braun et. al evaluates packet capture performance on
commodity hardware
Are there any gotchas regarding interface configuration for live capture? Or why might I be seeing abnormally large packets much greater than interface MTU?
-------------------------------------------------------------------------------------------------------------------------------------------------------------
Some NICs offload the reassembly of traffic into "superpackets" so that
fewer packets are then passed up the stack (e.g. "TCP segmentation
offload", or "generic segmentation offload"). The result is that the
capturing application will observe packets much larger than the MTU size
of the interface they were captured from and may also interfere with the
maximum packet capture length, ``snaplen``, so it's a good idea to disable
an interface's offloading features.
You can use the ``ethtool`` program on Linux to view and disable
offloading features of an interface. See this page for more explicit
directions:
http://securityonion.blogspot.com/2011/10/when-is-full-packet-capture-not-full.html
What does an error message like ``internal error: NB-DNS error`` mean?
----------------------------------------------------------------------
That often means that DNS is not set up correctly on the system
running Bro. Try verifying from the command line that DNS lookups
work, e.g., ``host www.google.com``.
I am using OpenBSD and having problems installing Bro?
------------------------------------------------------
One potential issue is that the top-level Makefile may not work with
OpenBSD's default make program, in which case you can either install
the ``gmake`` package and use it instead or first change into the
``build/`` directory before doing either ``make`` or ``make install``
such that the CMake-generated Makefile's are used directly.
Generally, please note that we do not regularly test OpenBSD builds.
We appreciate any patches that improve Bro's support for this
platform.
How do BroControl options affect Bro script variables?
------------------------------------------------------
Some (but not all) BroControl options override a corresponding Bro script variable.
For example, setting the BroControl option "LogRotationInterval" will override
the value of the Bro script variable "Log::default_rotation_interval".
See the :doc:`BroControl Documentation <components/broctl/README>` to find out
which BroControl options override Bro script variables, and for more discussion
on site-specific customization.
Usage
=====
How can I identify backscatter?
-------------------------------
Identifying backscatter via connections labeled as ``OTH`` is not a reliable
means to detect backscatter. Backscatter is however visible by interpreting
the contents of the ``history`` field in the ``conn.log`` file. The basic idea
is to watch for connections that never had an initial ``SYN`` but started
instead with a ``SYN-ACK`` or ``RST`` (though this latter generally is just
discarded). Here are some history fields which provide backscatter examples:
``hAFf``, ``r``. Refer to the conn protocol analysis scripts to interpret the
individual character meanings in the history field.
Is there help for understanding Bro's resource consumption?
-----------------------------------------------------------
There are two scripts that collect statistics on resource usage:
``misc/stats.bro`` and ``misc/profiling.bro``. The former is quite
lightweight, while the latter should only be used for debugging.
How can I capture packets as an unprivileged user?
--------------------------------------------------
Normally, unprivileged users cannot capture packets from a network interface,
which means they would not be able to use Bro to read/analyze live traffic.
However, there are operating system specific ways to enable packet capture
permission for non-root users, which is worth doing in the context of using
Bro to monitor live traffic.
With Linux Capabilities
^^^^^^^^^^^^^^^^^^^^^^^
Fully implemented since Linux kernel 2.6.24, capabilities are a way of
parceling superuser privileges into distinct units. Attach capabilities
required to capture packets to the ``bro`` executable file like this:
.. console::
sudo setcap cap_net_raw,cap_net_admin=eip /path/to/bro
Now any unprivileged user should have the capability to capture packets
using Bro provided that they have the traditional file permissions to
read/execute the ``bro`` binary.
With BPF Devices
^^^^^^^^^^^^^^^^
Systems using Berkeley Packet Filter (BPF) (e.g. FreeBSD & Mac OS X)
can allow users with read access to a BPF device to capture packets from
it using libpcap.
* Example of manually changing BPF device permissions to allow users in
the ``admin`` group to capture packets:
.. console::
sudo chgrp admin /dev/bpf*
sudo chmod g+r /dev/bpf*
* Example of configuring devfs to set permissions of BPF devices, adding
entries to ``/etc/devfs.conf`` to grant ``admin`` group permission to
capture packets:
.. console::
sudo sh -c 'echo "own bpf root:admin" >> /etc/devfs.conf'
sudo sh -c 'echo "perm bpf 0640" >> /etc/devfs.conf'
sudo service devfs restart
.. note:: As of Mac OS X 10.6, the BPF device is on devfs, but the used version
of devfs isn't capable of setting the device permissions. The permissions
can be changed manually, but they will not survive a reboot.
Why isn't Bro producing the logs I expect? (A Note About Checksums)
-------------------------------------------------------------------
Normally, Bro's event engine will discard packets which don't have valid
checksums. This can be a problem if one wants to analyze locally
generated/captured traffic on a system that offloads checksumming to the
network adapter. In that case, all transmitted/captured packets will have
bad checksums because they haven't yet been calculated by the NIC, thus
such packets will not undergo analysis defined in Bro policy scripts as they
normally would. Bad checksums in traces may also be a result of some packet
alteration tools.
Bro has two options to workaround such situations and ignore bad checksums:
1) The ``-C`` command line option to ``bro``.
2) An option called ``ignore_checksums`` that can be redefined at the
policy script layer (e.g. in your ``$PREFIX/share/bro/site/local.bro``):
.. code:: bro
redef ignore_checksums = T;
The other alternative is to disable checksum offloading for your
network adapter, but this is not always possible or desirable.
.. raw:: html
</div>

View file

@ -3,7 +3,7 @@
GeoLocation
===========
.. class:: opening
.. rst-class:: opening
During the process of creating policy scripts the need may arise
to find the geographic location for an IP address. Bro has support

View file

Before

Width:  |  Height:  |  Size: 35 KiB

After

Width:  |  Height:  |  Size: 35 KiB

Before After
Before After

View file

@ -1,50 +1,90 @@
.. Bro documentation master file
=================
Bro Documentation
=================
`Getting Started <{{git('bro:doc/quickstart.rst')}}>`_
A quick introduction into using Bro 2.x.
Guides
------
`Bro 1.5 to 2.0 Upgrade Guide <{{git('bro:doc/upgrade.rst')}}>`_
Guidelines and notes about upgrading from Bro 1.5 to 2.x. Lots of
things have changed, so make sure to read this when upgrading.
.. toctree::
:maxdepth: 1
`BroControl <{{git('broctl:doc/broctl.rst')}}>`_
An interactive console for managing Bro installations.
`Script Reference <{{autodoc_bro_scripts}}/index.html>`_
A complete reference of all policy scripts shipped with Bro.
`FAQ <{{docroot}}/documentation/faq.html>`_
A list with frequently asked questions.
`How to Report a Problem <{{docroot}}/documentation/reporting-problems.html>`_
Some advice for when you see Bro doing something you believe it
shouldn't.
INSTALL
quickstart
upgrade
faq
reporting-problems
Frameworks
----------
Bro comes with a number of frameworks, some of which are described in
more detail here:
.. toctree::
:maxdepth: 1
`Notice <{{git('bro:doc/notice.rst')}}>`_
The notice framework.
`Logging <{{git('bro:doc/logging.rst')}}>`_
Customizing and extensing Bro's logging.
`Cluster <{{git('bro:doc/cluster.rst')}}>`_
Setting up a Bro Cluster when a single box can't handle the traffic anymore.
`Signatures <{{git('bro:doc/signatures.rst')}}>`_
Bro has support for traditional NIDS signatures as well.
notice
logging
input
cluster
signatures
How-Tos
-------
We also collect more specific How-Tos on specific topics:
.. toctree::
:maxdepth: 1
`Using GeoIP in Bro scripts <{{git('bro:doc/geoip.rst')}}>`_
Installation and usage of the the GeoIP library.
geoip
Script Reference
----------------
.. toctree::
:maxdepth: 1
scripts/packages
scripts/index
scripts/builtins
scripts/bifs
Other Bro Components
--------------------
The following are snapshots of documentation for components that come
with this version of Bro (|version|). Since they can also be used
independently, see the `download page
<http://bro-ids.org/download/index.html>`_ for documentation of any
current, independent component releases.
.. toctree::
:maxdepth: 1
BinPAC - A protocol parser generator <components/binpac/README>
Broccoli - The Bro Client Communication Library (README) <components/broccoli/README>
Broccoli - User Manual <components/broccoli/broccoli-manual>
Broccoli Python Bindings <components/broccoli-python/README>
Broccoli Ruby Bindings <components/broccoli-ruby/README>
BroControl - Interactive Bro management shell <components/broctl/README>
Bro-Aux - Small auxiliary tools for Bro <components/bro-aux/README>
BTest - A unit testing framework <components/btest/README>
Capstats - Command-line packet statistic tool <components/capstats/README>
PySubnetTree - Python module for CIDR lookups<components/pysubnettree/README>
trace-summary - Script for generating break-downs of network traffic <components/trace-summary/README>
The `Broccoli API Reference <broccoli-api/index.html>`_ may also be of
interest.
Other Indices and References
----------------------------
* :ref:`General Index <genindex>`
* `Notice Index <bro-noticeindex.html>`_
* :ref:`search`
Internal References
-------------------
.. toctree::
:maxdepth: 1
scripts/internal

407
doc/input.rst Normal file
View file

@ -0,0 +1,407 @@
==============================================
Loading Data into Bro with the Input Framework
==============================================
.. rst-class:: opening
Bro now features a flexible input framework that allows users
to import data into Bro. Data is either read into Bro tables or
converted to events which can then be handled by scripts.
This document gives an overview of how to use the input framework
with some examples. For more complex scenarios it is
worthwhile to take a look at the unit tests in
``testing/btest/scripts/base/frameworks/input/``.
.. contents::
Reading Data into Tables
========================
Probably the most interesting use-case of the input framework is to
read data into a Bro table.
By default, the input framework reads the data in the same format
as it is written by the logging framework in Bro - a tab-separated
ASCII file.
We will show the ways to read files into Bro with a simple example.
For this example we assume that we want to import data from a blacklist
that contains server IP addresses as well as the timestamp and the reason
for the block.
An example input file could look like this:
::
#fields ip timestamp reason
192.168.17.1 1333252748 Malware host
192.168.27.2 1330235733 Botnet server
192.168.250.3 1333145108 Virus detected
To read a file into a Bro table, two record types have to be defined.
One contains the types and names of the columns that should constitute the
table keys and the second contains the types and names of the columns that
should constitute the table values.
In our case, we want to be able to lookup IPs. Hence, our key record
only contains the server IP. All other elements should be stored as
the table content.
The two records are defined as:
.. code:: bro
type Idx: record {
ip: addr;
};
type Val: record {
timestamp: time;
reason: string;
};
Note that the names of the fields in the record definitions have to correspond
to the column names listed in the '#fields' line of the log file, in this
case 'ip', 'timestamp', and 'reason'.
The log file is read into the table with a simple call of the ``add_table``
function:
.. code:: bro
global blacklist: table[addr] of Val = table();
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist]);
Input::remove("blacklist");
With these three lines we first create an empty table that should contain the
blacklist data and then instruct the input framework to open an input stream
named ``blacklist`` to read the data into the table. The third line removes the
input stream again, because we do not need it any more after the data has been
read.
Because some data files can - potentially - be rather big, the input framework
works asynchronously. A new thread is created for each new input stream.
This thread opens the input data file, converts the data into a Bro format and
sends it back to the main Bro thread.
Because of this, the data is not immediately accessible. Depending on the
size of the data source it might take from a few milliseconds up to a few
seconds until all data is present in the table. Please note that this means
that when Bro is running without an input source or on very short captured
files, it might terminate before the data is present in the system (because
Bro already handled all packets before the import thread finished).
Subsequent calls to an input source are queued until the previous action has
been completed. Because of this, it is, for example, possible to call
``add_table`` and ``remove`` in two subsequent lines: the ``remove`` action
will remain queued until the first read has been completed.
Once the input framework finishes reading from a data source, it fires
the ``end_of_data`` event. Once this event has been received all data
from the input file is available in the table.
.. code:: bro
event Input::end_of_data(name: string, source: string) {
# now all data is in the table
print blacklist;
}
The table can also already be used while the data is still being read - it
just might not contain all lines in the input file when the event has not
yet fired. After it has been populated it can be used like any other Bro
table and blacklist entries can easily be tested:
.. code:: bro
if ( 192.168.18.12 in blacklist )
# take action
Re-reading and streaming data
-----------------------------
For many data sources, like for many blacklists, the source data is continually
changing. For these cases, the Bro input framework supports several ways to
deal with changing data files.
The first, very basic method is an explicit refresh of an input stream. When
an input stream is open, the function ``force_update`` can be called. This
will trigger a complete refresh of the table; any changed elements from the
file will be updated. After the update is finished the ``end_of_data``
event will be raised.
In our example the call would look like:
.. code:: bro
Input::force_update("blacklist");
The input framework also supports two automatic refresh modes. The first mode
continually checks if a file has been changed. If the file has been changed, it
is re-read and the data in the Bro table is updated to reflect the current
state. Each time a change has been detected and all the new data has been
read into the table, the ``end_of_data`` event is raised.
The second mode is a streaming mode. This mode assumes that the source data
file is an append-only file to which new data is continually appended. Bro
continually checks for new data at the end of the file and will add the new
data to the table. If newer lines in the file have the same index as previous
lines, they will overwrite the values in the output table. Because of the
nature of streaming reads (data is continually added to the table),
the ``end_of_data`` event is never raised when using streaming reads.
The reading mode can be selected by setting the ``mode`` option of the
add_table call. Valid values are ``MANUAL`` (the default), ``REREAD``
and ``STREAM``.
Hence, when adding ``$mode=Input::REREAD`` to the previous example, the
blacklist table will always reflect the state of the blacklist input file.
.. code:: bro
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD]);
Receiving change events
-----------------------
When re-reading files, it might be interesting to know exactly which lines in
the source files have changed.
For this reason, the input framework can raise an event each time when a data
item is added to, removed from or changed in a table.
The event definition looks like this:
.. code:: bro
event entry(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) {
# act on values
}
The event has to be specified in ``$ev`` in the ``add_table`` call:
.. code:: bro
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD, $ev=entry]);
The ``description`` field of the event contains the arguments that were
originally supplied to the add_table call. Hence, the name of the stream can,
for example, be accessed with ``description$name``. ``tpe`` is an enum
containing the type of the change that occurred.
If a line that was not previously present in the table has been added,
then ``tpe`` will contain ``Input::EVENT_NEW``. In this case ``left`` contains
the index of the added table entry and ``right`` contains the values of the
added entry.
If a table entry that already was present is altered during the re-reading or
streaming read of a file, ``tpe`` will contain ``Input::EVENT_CHANGED``. In
this case ``left`` contains the index of the changed table entry and ``right``
contains the values of the entry before the change. The reason for this is
that the table already has been updated when the event is raised. The current
value in the table can be ascertained by looking up the current table value.
Hence it is possible to compare the new and the old values of the table.
If a table element is removed because it was no longer present during a
re-read, then ``tpe`` will contain ``Input::REMOVED``. In this case ``left``
contains the index and ``right`` the values of the removed element.
Filtering data during import
----------------------------
The input framework also allows a user to filter the data during the import.
To this end, predicate functions are used. A predicate function is called
before a new element is added/changed/removed from a table. The predicate
can either accept or veto the change by returning true for an accepted
change and false for a rejected change. Furthermore, it can alter the data
before it is written to the table.
The following example filter will reject to add entries to the table when
they were generated over a month ago. It will accept all changes and all
removals of values that are already present in the table.
.. code:: bro
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD,
$pred(typ: Input::Event, left: Idx, right: Val) = {
if ( typ != Input::EVENT_NEW ) {
return T;
}
return ( ( current_time() - right$timestamp ) < (30 day) );
}]);
To change elements while they are being imported, the predicate function can
manipulate ``left`` and ``right``. Note that predicate functions are called
before the change is committed to the table. Hence, when a table element is
changed (``tpe`` is ``INPUT::EVENT_CHANGED``), ``left`` and ``right``
contain the new values, but the destination (``blacklist`` in our example)
still contains the old values. This allows predicate functions to examine
the changes between the old and the new version before deciding if they
should be allowed.
Different readers
-----------------
The input framework supports different kinds of readers for different kinds
of source data files. At the moment, the default reader reads ASCII files
formatted in the Bro log file format (tab-separated values). At the moment,
Bro comes with two other readers. The ``RAW`` reader reads a file that is
split by a specified record separator (usually newline). The contents are
returned line-by-line as strings; it can, for example, be used to read
configuration files and the like and is probably
only useful in the event mode and not for reading data to tables.
Another included reader is the ``BENCHMARK`` reader, which is being used
to optimize the speed of the input framework. It can generate arbitrary
amounts of semi-random data in all Bro data types supported by the input
framework.
In the future, the input framework will get support for new data sources
like, for example, different databases.
Add_table options
-----------------
This section lists all possible options that can be used for the add_table
function and gives a short explanation of their use. Most of the options
already have been discussed in the previous sections.
The possible fields that can be set for a table stream are:
``source``
A mandatory string identifying the source of the data.
For the ASCII reader this is the filename.
``name``
A mandatory name for the filter that can later be used
to manipulate it further.
``idx``
Record type that defines the index of the table.
``val``
Record type that defines the values of the table.
``reader``
The reader used for this stream. Default is ``READER_ASCII``.
``mode``
The mode in which the stream is opened. Possible values are
``MANUAL``, ``REREAD`` and ``STREAM``. Default is ``MANUAL``.
``MANUAL`` means that the file is not updated after it has
been read. Changes to the file will not be reflected in the
data Bro knows. ``REREAD`` means that the whole file is read
again each time a change is found. This should be used for
files that are mapped to a table where individual lines can
change. ``STREAM`` means that the data from the file is
streamed. Events / table entries will be generated as new
data is appended to the file.
``destination``
The destination table.
``ev``
Optional event that is raised, when values are added to,
changed in, or deleted from the table. Events are passed an
Input::Event description as the first argument, the index
record as the second argument and the values as the third
argument.
``pred``
Optional predicate, that can prevent entries from being added
to the table and events from being sent.
``want_record``
Boolean value, that defines if the event wants to receive the
fields inside of a single record value, or individually
(default). This can be used if ``val`` is a record
containing only one type. In this case, if ``want_record`` is
set to false, the table will contain elements of the type
contained in ``val``.
Reading Data to Events
======================
The second supported mode of the input framework is reading data to Bro
events instead of reading them to a table using event streams.
Event streams work very similarly to table streams that were already
discussed in much detail. To read the blacklist of the previous example
into an event stream, the following Bro code could be used:
.. code:: bro
type Val: record {
ip: addr;
timestamp: time;
reason: string;
};
event blacklistentry(description: Input::EventDescription, tpe: Input::Event, ip: addr, timestamp: time, reason: string) {
# work with event data
}
event bro_init() {
Input::add_event([$source="blacklist.file", $name="blacklist", $fields=Val, $ev=blacklistentry]);
}
The main difference in the declaration of the event stream is, that an event
stream needs no separate index and value declarations -- instead, all source
data types are provided in a single record definition.
Apart from this, event streams work exactly the same as table streams and
support most of the options that are also supported for table streams.
The options that can be set when creating an event stream with
``add_event`` are:
``source``
A mandatory string identifying the source of the data.
For the ASCII reader this is the filename.
``name``
A mandatory name for the stream that can later be used
to remove it.
``fields``
Name of a record type containing the fields, which should be
retrieved from the input stream.
``ev``
The event which is fired, after a line has been read from the
input source. The first argument that is passed to the event
is an Input::Event structure, followed by the data, either
inside of a record (if ``want_record is set``) or as
individual fields. The Input::Event structure can contain
information, if the received line is ``NEW``, has been
``CHANGED`` or ``DELETED``. Since the ASCII reader cannot
track this information for event filters, the value is
always ``NEW`` at the moment.
``mode``
The mode in which the stream is opened. Possible values are
``MANUAL``, ``REREAD`` and ``STREAM``. Default is ``MANUAL``.
``MANUAL`` means that the file is not updated after it has
been read. Changes to the file will not be reflected in the
data Bro knows. ``REREAD`` means that the whole file is read
again each time a change is found. This should be used for
files that are mapped to a table where individual lines can
change. ``STREAM`` means that the data from the file is
streamed. Events / table entries will be generated as new
data is appended to the file.
``reader``
The reader used for this stream. Default is ``READER_ASCII``.
``want_record``
Boolean value, that defines if the event wants to receive the
fields inside of a single record value, or individually
(default). If this is set to true, the event will receive a
single record of the type provided in ``fields``.

186
doc/logging-dataseries.rst Normal file
View file

@ -0,0 +1,186 @@
=============================
Binary Output with DataSeries
=============================
.. rst-class:: opening
Bro's default ASCII log format is not exactly the most efficient
way for storing and searching large volumes of data. An an
alternative, Bro comes with experimental support for `DataSeries
<http://www.hpl.hp.com/techreports/2009/HPL-2009-323.html>`_
output, an efficient binary format for recording structured bulk
data. DataSeries is developed and maintained at HP Labs.
.. contents::
Installing DataSeries
---------------------
To use DataSeries, its libraries must be available at compile-time,
along with the supporting *Lintel* package. Generally, both are
distributed on `HP Labs' web site
<http://tesla.hpl.hp.com/opensource/>`_. Currently, however, you need
to use recent development versions for both packages, which you can
download from github like this::
git clone http://github.com/dataseries/Lintel
git clone http://github.com/dataseries/DataSeries
To build and install the two into ``<prefix>``, do::
( cd Lintel && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX=<prefix> .. && make && make install )
( cd DataSeries && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX=<prefix> .. && make && make install )
Please refer to the packages' documentation for more information about
the installation process. In particular, there's more information on
required and optional `dependencies for Lintel
<https://raw.github.com/dataseries/Lintel/master/doc/dependencies.txt>`_
and `dependencies for DataSeries
<https://raw.github.com/dataseries/DataSeries/master/doc/dependencies.txt>`_.
For users on RedHat-style systems, you'll need the following::
yum install libxml2-devel boost-devel
Compiling Bro with DataSeries Support
-------------------------------------
Once you have installed DataSeries, Bro's ``configure`` should pick it
up automatically as long as it finds it in a standard system location.
Alternatively, you can specify the DataSeries installation prefix
manually with ``--with-dataseries=<prefix>``. Keep an eye on
``configure``'s summary output, if it looks like the following, Bro
found DataSeries and will compile in the support::
# ./configure --with-dataseries=/usr/local
[...]
====================| Bro Build Summary |=====================
[...]
DataSeries: true
[...]
================================================================
Activating DataSeries
---------------------
The direct way to use DataSeries is to switch *all* log files over to
the binary format. To do that, just add ``redef
Log::default_writer=Log::WRITER_DATASERIES;`` to your ``local.bro``.
For testing, you can also just pass that on the command line::
bro -r trace.pcap Log::default_writer=Log::WRITER_DATASERIES
With that, Bro will now write all its output into DataSeries files
``*.ds``. You can inspect these using DataSeries's set of command line
tools, which its installation process installs into ``<prefix>/bin``.
For example, to convert a file back into an ASCII representation::
$ ds2txt conn.log
[... We skip a bunch of metadata here ...]
ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes
1300475167.096535 CRCC5OdDlXe 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0
1300475167.097012 o7XBsfvo3U1 fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0.000000 0 0 S0 F 0 D 1 199 0 0
1300475167.099816 pXPi1kPMgxb 141.142.220.50 5353 224.0.0.251 5353 udp 0.000000 0 0 S0 F 0 D 1 179 0 0
1300475168.853899 R7sOc16woCj 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 38 89 SF F 0 Dd 1 66 1 117
1300475168.854378 Z6dfHVmt0X7 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 52 99 SF F 0 Dd 1 80 1 127
1300475168.854837 k6T92WxgNAh 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF F 0 Dd 1 66 1 211
[...]
(``--skip-all`` suppresses the metadata.)
Note that the ASCII conversion is *not* equivalent to Bro's default
output format.
You can also switch only individual files over to DataSeries by adding
code like this to your ``local.bro``:
.. code:: bro
event bro_init()
{
local f = Log::get_filter(Conn::LOG, "default"); # Get default filter for connection log.
f$writer = Log::WRITER_DATASERIES; # Change writer type.
Log::add_filter(Conn::LOG, f); # Replace filter with adapted version.
}
Bro's DataSeries writer comes with a few tuning options, see
:doc:`scripts/base/frameworks/logging/writers/dataseries`.
Working with DataSeries
=======================
Here are a few examples of using DataSeries command line tools to work
with the output files.
* Printing CSV::
$ ds2txt --csv conn.log
ts,uid,id.orig_h,id.orig_p,id.resp_h,id.resp_p,proto,service,duration,orig_bytes,resp_bytes,conn_state,local_orig,missed_bytes,history,orig_pkts,orig_ip_bytes,resp_pkts,resp_ip_bytes
1258790493.773208,ZTtgbHvf4s3,192.168.1.104,137,192.168.1.255,137,udp,dns,3.748891,350,0,S0,F,0,D,7,546,0,0
1258790451.402091,pOY6Rw7lhUd,192.168.1.106,138,192.168.1.255,138,udp,,0.000000,0,0,S0,F,0,D,1,229,0,0
1258790493.787448,pn5IiEslca9,192.168.1.104,138,192.168.1.255,138,udp,,2.243339,348,0,S0,F,0,D,2,404,0,0
1258790615.268111,D9slyIu3hFj,192.168.1.106,137,192.168.1.255,137,udp,dns,3.764626,350,0,S0,F,0,D,7,546,0,0
[...]
Add ``--separator=X`` to set a different separator.
* Extracting a subset of columns::
$ ds2txt --select '*' ts,id.resp_h,id.resp_p --skip-all conn.log
1258790493.773208 192.168.1.255 137
1258790451.402091 192.168.1.255 138
1258790493.787448 192.168.1.255 138
1258790615.268111 192.168.1.255 137
1258790615.289842 192.168.1.255 138
[...]
* Filtering rows::
$ ds2txt --where '*' 'duration > 5 && id.resp_p > 1024' --skip-all conn.ds
1258790631.532888 V8mV5WLITu5 192.168.1.105 55890 239.255.255.250 1900 udp 15.004568 798 0 S0 F 0 D 6 966 0 0
1258792413.439596 tMcWVWQptvd 192.168.1.105 55890 239.255.255.250 1900 udp 15.004581 798 0 S0 F 0 D 6 966 0 0
1258794195.346127 cQwQMRdBrKa 192.168.1.105 55890 239.255.255.250 1900 udp 15.005071 798 0 S0 F 0 D 6 966 0 0
1258795977.253200 i8TEjhWd2W8 192.168.1.105 55890 239.255.255.250 1900 udp 15.004824 798 0 S0 F 0 D 6 966 0 0
1258797759.160217 MsLsBA8Ia49 192.168.1.105 55890 239.255.255.250 1900 udp 15.005078 798 0 S0 F 0 D 6 966 0 0
1258799541.068452 TsOxRWJRGwf 192.168.1.105 55890 239.255.255.250 1900 udp 15.004082 798 0 S0 F 0 D 6 966 0 0
[...]
* Calculate some statistics:
Mean/stddev/min/max over a column::
$ dsstatgroupby '*' basic duration from conn.ds
# Begin DSStatGroupByModule
# processed 2159 rows, where clause eliminated 0 rows
# count(*), mean(duration), stddev, min, max
2159, 42.7938, 1858.34, 0, 86370
[...]
Quantiles of total connection volume::
$ dsstatgroupby '*' quantile 'orig_bytes + resp_bytes' from conn.ds
[...]
2159 data points, mean 24616 +- 343295 [0,1.26615e+07]
quantiles about every 216 data points:
10%: 0, 124, 317, 348, 350, 350, 601, 798, 1469
tails: 90%: 1469, 95%: 7302, 99%: 242629, 99.5%: 1226262
[...]
The ``man`` pages for these tools show further options, and their
``-h`` option gives some more information (either can be a bit cryptic
unfortunately though).
Deficiencies
------------
Due to limitations of the DataSeries format, one cannot inspect its
files before they have been fully written. In other words, when using
DataSeries, it's currently not possible to inspect the live log
files inside the spool directory before they are rotated to their
final location. It seems that this could be fixed with some effort,
and we will work with DataSeries development team on that if the
format gains traction among Bro users.
Likewise, we're considering writing custom command line tools for
interacting with DataSeries files, making that a bit more convenient
than what the standard utilities provide.

View file

@ -0,0 +1,89 @@
=========================================
Indexed Logging Output with ElasticSearch
=========================================
.. rst-class:: opening
Bro's default ASCII log format is not exactly the most efficient
way for searching large volumes of data. ElasticSearch
is a new data storage technology for dealing with tons of data.
It's also a search engine built on top of Apache's Lucene
project. It scales very well, both for distributed indexing and
distributed searching.
.. contents::
Warning
-------
This writer plugin is still in testing and is not yet recommended for
production use! The approach to how logs are handled in the plugin is "fire
and forget" at this time, there is no error handling if the server fails to
respond successfully to the insertion request.
Installing ElasticSearch
------------------------
Download the latest version from: <http://www.elasticsearch.org/download/>.
Once extracted, start ElasticSearch with::
# ./bin/elasticsearch
For more detailed information, refer to the ElasticSearch installation
documentation: http://www.elasticsearch.org/guide/reference/setup/installation.html
Compiling Bro with ElasticSearch Support
----------------------------------------
First, ensure that you have libcurl installed the run configure.::
# ./configure
[...]
====================| Bro Build Summary |=====================
[...]
cURL: true
[...]
ElasticSearch: true
[...]
================================================================
Activating ElasticSearch
------------------------
The easiest way to enable ElasticSearch output is to load the tuning/logs-to-
elasticsearch.bro script. If you are using BroControl, the following line in
local.bro will enable it.
.. console::
@load tuning/logs-to-elasticsearch
With that, Bro will now write most of its logs into ElasticSearch in addition
to maintaining the Ascii logs like it would do by default. That script has
some tunable options for choosing which logs to send to ElasticSearch, refer
to the autogenerated script documentation for those options.
There is an interface being written specifically to integrate with the data
that Bro outputs into ElasticSearch named Brownian. It can be found here::
https://github.com/grigorescu/Brownian
Tuning
------
A common problem encountered with ElasticSearch is too many files being held
open. The ElasticSearch website has some suggestions on how to increase the
open file limit.
- http://www.elasticsearch.org/tutorials/2011/04/06/too-many-open-files.html
TODO
----
Lots.
- Perform multicast discovery for server.
- Better error detection.
- Better defaults (don't index loaded-plugins, for instance).
-

View file

@ -2,7 +2,7 @@
Customizing Bro's Logging
==========================
.. class:: opening
.. rst-class:: opening
Bro comes with a flexible key-value based logging interface that
allows fine-grained control of what gets logged and how it is
@ -43,13 +43,14 @@ Basics
======
The data fields that a stream records are defined by a record type
specified when it is created. Let's look at the script generating
Bro's connection summaries as an example,
``base/protocols/conn/main.bro``. It defines a record ``Conn::Info``
that lists all the fields that go into ``conn.log``, each marked with
a ``&log`` attribute indicating that it is part of the information
written out. To write a log record, the script then passes an instance
of ``Conn::Info`` to the logging framework's ``Log::write`` function.
specified when it is created. Let's look at the script generating Bro's
connection summaries as an example,
:doc:`scripts/base/protocols/conn/main`. It defines a record
:bro:type:`Conn::Info` that lists all the fields that go into
``conn.log``, each marked with a ``&log`` attribute indicating that it
is part of the information written out. To write a log record, the
script then passes an instance of :bro:type:`Conn::Info` to the logging
framework's :bro:id:`Log::write` function.
By default, each stream automatically gets a filter named ``default``
that generates the normal output by recording all record fields into a
@ -62,11 +63,11 @@ to work with.
Filtering
---------
To create new a new output file for an existing stream, you can add a
To create a new output file for an existing stream, you can add a
new filter. A filter can, e.g., restrict the set of fields being
logged:
.. code:: bro:
.. code:: bro
event bro_init()
{
@ -85,14 +86,15 @@ Note the fields that are set for the filter:
``path``
The filename for the output file, without any extension (which
may be automatically added by the writer). Default path values
are generated by taking the stream's ID and munging it
slightly. ``Conn::LOG`` is converted into ``conn``,
``PacketFilter::LOG`` is converted into ``packet_filter``, and
``Notice::POLICY_LOG`` is converted into ``notice_policy``.
are generated by taking the stream's ID and munging it slightly.
:bro:enum:`Conn::LOG` is converted into ``conn``,
:bro:enum:`PacketFilter::LOG` is converted into
``packet_filter``, and :bro:enum:`Notice::POLICY_LOG` is
converted into ``notice_policy``.
``include``
A set limiting the fields to the ones given. The names
correspond to those in the ``Conn::LOG`` record, with
correspond to those in the :bro:type:`Conn::Info` record, with
sub-records unrolled by concatenating fields (separated with
dots).
@ -158,10 +160,10 @@ further for example to log information by subnets or even by IP
address. Be careful, however, as it is easy to create many files very
quickly ...
.. sidebar:
.. sidebar:: A More Generic Path Function
The show ``split_log`` method has one draw-back: it can be used
only with the ``Conn::Log`` stream as the record type is hardcoded
The ``split_log`` method has one draw-back: it can be used
only with the :bro:enum:`Conn::LOG` stream as the record type is hardcoded
into its argument list. However, Bro allows to do a more generic
variant:
@ -193,7 +195,7 @@ predicate that will be called for each log record:
Log::add_filter(Conn::LOG, filter);
}
This will results in a log file ``conn-http.log`` that contains only
This will result in a log file ``conn-http.log`` that contains only
traffic detected and analyzed as HTTP traffic.
Extending
@ -201,8 +203,8 @@ Extending
You can add further fields to a log stream by extending the record
type that defines its content. Let's say we want to add a boolean
field ``is_private`` to ``Conn::Info`` that indicates whether the
originator IP address is part of the RFC1918 space:
field ``is_private`` to :bro:type:`Conn::Info` that indicates whether the
originator IP address is part of the :rfc:`1918` space:
.. code:: bro
@ -234,10 +236,10 @@ Notes:
- For extending logs this way, one needs a bit of knowledge about how
the script that creates the log stream is organizing its state
keeping. Most of the standard Bro scripts attach their log state to
the ``connection`` record where it can then be accessed, just as the
``c$conn`` above. For example, the HTTP analysis adds a field ``http
: HTTP::Info`` to the ``connection`` record. See the script
reference for more information.
the :bro:type:`connection` record where it can then be accessed, just
as the ``c$conn`` above. For example, the HTTP analysis adds a field
``http`` of type :bro:type:`HTTP::Info` to the :bro:type:`connection`
record. See the script reference for more information.
- When extending records as shown above, the new fields must always be
declared either with a ``&default`` value or as ``&optional``.
@ -251,8 +253,8 @@ Sometimes it is helpful to do additional analysis of the information
being logged. For these cases, a stream can specify an event that will
be generated every time a log record is written to it. All of Bro's
default log streams define such an event. For example, the connection
log stream raises the event ``Conn::log_conn(rec: Conn::Info)``: You
could use that for example for flagging when an a connection to
log stream raises the event :bro:id:`Conn::log_conn`. You
could use that for example for flagging when a connection to a
specific destination exceeds a certain duration:
.. code:: bro
@ -267,7 +269,7 @@ specific destination exceeds a certain duration:
{
if ( rec$duration > 5mins )
NOTICE([$note=Long_Conn_Found,
$msg=fmt("unsually long conn to %s", rec$id$resp_h),
$msg=fmt("unusually long conn to %s", rec$id$resp_h),
$id=rec$id]);
}
@ -279,11 +281,32 @@ real-time.
Rotation
--------
By default, no log rotation occurs, but it's globally controllable for all
filters by redefining the :bro:id:`Log::default_rotation_interval` option:
.. code:: bro
redef Log::default_rotation_interval = 1 hr;
Or specifically for certain :bro:type:`Log::Filter` instances by setting
their ``interv`` field. Here's an example of changing just the
:bro:enum:`Conn::LOG` stream's default filter rotation.
.. code:: bro
event bro_init()
{
local f = Log::get_filter(Conn::LOG, "default");
f$interv = 1 min;
Log::remove_filter(Conn::LOG, "default");
Log::add_filter(Conn::LOG, f);
}
ASCII Writer Configuration
--------------------------
The ASCII writer has a number of options for customizing the format of
its output, see XXX.bro.
its output, see :doc:`scripts/base/frameworks/logging/writers/ascii`.
Adding Streams
==============
@ -296,7 +319,7 @@ example for the ``Foo`` module:
module Foo;
export {
# Create an ID for the our new stream. By convention, this is
# Create an ID for our new stream. By convention, this is
# called "LOG".
redef enum Log::ID += { LOG };
@ -321,8 +344,8 @@ example for the ``Foo`` module:
Log::create_stream(Foo::LOG, [$columns=Info, $ev=log_foo]);
}
You can also the state to the ``connection`` record to make it easily
accessible across event handlers:
You can also add the state to the :bro:type:`connection` record to make
it easily accessible across event handlers:
.. code:: bro
@ -330,7 +353,7 @@ accessible across event handlers:
foo: Info &optional;
}
Now you can use the ``Log::write`` method to output log records and
Now you can use the :bro:id:`Log::write` method to output log records and
save the logged ``Foo::Info`` record into the connection record:
.. code:: bro
@ -343,10 +366,21 @@ save the logged ``Foo::Info`` record into the connection record:
}
See the existing scripts for how to work with such a new connection
field. A simple example is ``base/protocols/syslog/main.bro``.
field. A simple example is :doc:`scripts/base/protocols/syslog/main`.
When you are developing scripts that add data to the ``connection``
When you are developing scripts that add data to the :bro:type:`connection`
record, care must be given to when and how long data is stored.
Normally data saved to the connection record will remain there for the
duration of the connection and from a practical perspective it's not
uncommon to need to delete that data before the end of the connection.
Other Writers
-------------
Bro supports the following output formats other than ASCII:
.. toctree::
:maxdepth: 1
logging-dataseries
logging-elasticsearch

View file

@ -2,7 +2,7 @@
Notice Framework
================
.. class:: opening
.. rst-class:: opening
One of the easiest ways to customize Bro is writing a local notice
policy. Bro can detect a large number of potentially interesting
@ -21,7 +21,7 @@ Let's start with a little bit of background on Bro's philosophy on reporting
things. Bro ships with a large number of policy scripts which perform a wide
variety of analyses. Most of these scripts monitor for activity which might be
of interest for the user. However, none of these scripts determines the
importance of what it finds itself. Instead, the scripts only flags situations
importance of what it finds itself. Instead, the scripts only flag situations
as *potentially* interesting, leaving it to the local configuration to define
which of them are in fact actionable. This decoupling of detection and
reporting allows Bro to address the different needs that sites have:
@ -29,17 +29,18 @@ definitions of what constitutes an attack or even a compromise differ quite a
bit between environments, and activity deemed malicious at one site might be
fully acceptable at another.
Whenever one of Bro's analysis scripts sees something potentially interesting
it flags the situation by calling the ``NOTICE`` function and giving it a
single ``Notice::Info`` record. A Notice has a ``Notice::Type``, which
reflects the kind of activity that has been seen, and it is usually also
augmented with further context about the situation.
Whenever one of Bro's analysis scripts sees something potentially
interesting it flags the situation by calling the :bro:see:`NOTICE`
function and giving it a single :bro:see:`Notice::Info` record. A Notice
has a :bro:see:`Notice::Type`, which reflects the kind of activity that
has been seen, and it is usually also augmented with further context
about the situation.
More information about raising notices can be found in the `Raising Notices`_
section.
Once a notice is raised, it can have any number of actions applied to it by
the ``Notice::policy`` set which is described in the `Notice Policy`_
the :bro:see:`Notice::policy` set which is described in the `Notice Policy`_
section below. Such actions can be to send a mail to the configured
address(es) or to simply ignore the notice. Currently, the following actions
are defined:
@ -52,20 +53,20 @@ are defined:
- Description
* - Notice::ACTION_LOG
- Write the notice to the ``Notice::LOG`` logging stream.
- Write the notice to the :bro:see:`Notice::LOG` logging stream.
* - Notice::ACTION_ALARM
- Log into the ``Notice::ALARM_LOG`` stream which will rotate
- Log into the :bro:see:`Notice::ALARM_LOG` stream which will rotate
hourly and email the contents to the email address or addresses
defined in the ``Notice::mail_dest`` variable.
defined in the :bro:see:`Notice::mail_dest` variable.
* - Notice::ACTION_EMAIL
- Send the notice in an email to the email address or addresses given in
the ``Notice::mail_dest`` variable.
the :bro:see:`Notice::mail_dest` variable.
* - Notice::ACTION_PAGE
- Send an email to the email address or addresses given in the
``Notice::mail_page_dest`` variable.
:bro:see:`Notice::mail_page_dest` variable.
* - Notice::ACTION_NO_SUPPRESS
- This action will disable the built in notice suppression for the
@ -82,15 +83,17 @@ Processing Notices
Notice Policy
*************
The predefined set ``Notice::policy`` provides the mechanism for applying
actions and other behavior modifications to notices. Each entry of
``Notice::policy`` is a record of the type ``Notice::PolicyItem`` which
defines a condition to be matched against all raised notices and one or more
of a variety of behavior modifiers. The notice policy is defined by adding any
number of ``Notice::PolicyItem`` records to the ``Notice::policy`` set.
The predefined set :bro:see:`Notice::policy` provides the mechanism for
applying actions and other behavior modifications to notices. Each entry
of :bro:see:`Notice::policy` is a record of the type
:bro:see:`Notice::PolicyItem` which defines a condition to be matched
against all raised notices and one or more of a variety of behavior
modifiers. The notice policy is defined by adding any number of
:bro:see:`Notice::PolicyItem` records to the :bro:see:`Notice::policy`
set.
Here's a simple example which tells Bro to send an email for all notices of
type ``SSH::Login`` if the server is 10.0.0.1:
type :bro:see:`SSH::Login` if the server is 10.0.0.1:
.. code:: bro
@ -113,11 +116,11 @@ flexibility due to having access to Bro's full programming language.
Predicate Field
^^^^^^^^^^^^^^^
The ``Notice::PolicyItem`` record type has a field name ``$pred`` which
defines the entry's condition in the form of a predicate written as a Bro
function. The function is passed the notice as a ``Notice::Info`` record and
it returns a boolean value indicating if the entry is applicable to that
particular notice.
The :bro:see:`Notice::PolicyItem` record type has a field name ``$pred``
which defines the entry's condition in the form of a predicate written
as a Bro function. The function is passed the notice as a
:bro:see:`Notice::Info` record and it returns a boolean value indicating
if the entry is applicable to that particular notice.
.. note::
@ -125,14 +128,14 @@ particular notice.
(``T``) since an implicit false (``F``) value would never be used.
Bro evaluates the predicates of each entry in the order defined by the
``$priority`` field in ``Notice::PolicyItem`` records. The valid values are
0-10 with 10 being earliest evaluated. If ``$priority`` is omitted, the
default priority is 5.
``$priority`` field in :bro:see:`Notice::PolicyItem` records. The valid
values are 0-10 with 10 being earliest evaluated. If ``$priority`` is
omitted, the default priority is 5.
Behavior Modification Fields
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
There are a set of fields in the ``Notice::PolicyItem`` record type that
There are a set of fields in the :bro:see:`Notice::PolicyItem` record type that
indicate ways that either the notice or notice processing should be modified
if the predicate field (``$pred``) evaluated to true (``T``). Those fields are
explained in more detail in the following table.
@ -146,8 +149,8 @@ explained in more detail in the following table.
- Example
* - ``$action=<Notice::Action>``
- Each Notice::PolicyItem can have a single action applied to the notice
with this field.
- Each :bro:see:`Notice::PolicyItem` can have a single action
applied to the notice with this field.
- ``$action = Notice::ACTION_EMAIL``
* - ``$suppress_for=<interval>``
@ -162,9 +165,9 @@ explained in more detail in the following table.
- This field can be used for modification of the notice policy
evaluation. To stop processing of notice policy items before
evaluating all of them, set this field to ``T`` and make the ``$pred``
field return ``T``. ``Notice::PolicyItem`` records defined at a higher
priority as defined by the ``$priority`` field will still be evaluated
but those at a lower priority won't.
field return ``T``. :bro:see:`Notice::PolicyItem` records defined at
a higher priority as defined by the ``$priority`` field will still be
evaluated but those at a lower priority won't.
- ``$halt = T``
@ -186,11 +189,11 @@ Notice Policy Shortcuts
Although the notice framework provides a great deal of flexibility and
configurability there are many times that the full expressiveness isn't needed
and actually becomes a hindrance to achieving results. The framework provides
a default ``Notice::policy`` suite as a way of giving users the
a default :bro:see:`Notice::policy` suite as a way of giving users the
shortcuts to easily apply many common actions to notices.
These are implemented as sets and tables indexed with a
``Notice::Type`` enum value. The following table shows and describes
:bro:see:`Notice::Type` enum value. The following table shows and describes
all of the variables available for shortcut configuration of the notice
framework.
@ -201,40 +204,44 @@ framework.
* - Variable name
- Description
* - Notice::ignored_types
- Adding a ``Notice::Type`` to this set results in the notice
* - :bro:see:`Notice::ignored_types`
- Adding a :bro:see:`Notice::Type` to this set results in the notice
being ignored. It won't have any other action applied to it, not even
``Notice::ACTION_LOG``.
:bro:see:`Notice::ACTION_LOG`.
* - Notice::emailed_types
- Adding a ``Notice::Type`` to this set results in
``Notice::ACTION_EMAIL`` being applied to the notices of that type.
* - :bro:see:`Notice::emailed_types`
- Adding a :bro:see:`Notice::Type` to this set results in
:bro:see:`Notice::ACTION_EMAIL` being applied to the notices of
that type.
* - Notice::alarmed_types
- Adding a Notice::Type to this set results in
``Notice::ACTION_ALARM`` being applied to the notices of that type.
* - :bro:see:`Notice::alarmed_types`
- Adding a :bro:see:`Notice::Type` to this set results in
:bro:see:`Notice::ACTION_ALARM` being applied to the notices of
that type.
* - Notice::not_suppressed_types
- Adding a ``Notice::Type`` to this set results in that notice no longer
undergoing the normal notice suppression that would take place. Be
careful when using this in production it could result in a dramatic
increase in the number of notices being processed.
* - :bro:see:`Notice::not_suppressed_types`
- Adding a :bro:see:`Notice::Type` to this set results in that notice
no longer undergoing the normal notice suppression that would
take place. Be careful when using this in production it could
result in a dramatic increase in the number of notices being
processed.
* - Notice::type_suppression_intervals
- This is a table indexed on ``Notice::Type`` and yielding an interval.
It can be used as an easy way to extend the default suppression
interval for an entire ``Notice::Type`` without having to create a
whole ``Notice::policy`` entry and setting the ``$suppress_for``
field.
* - :bro:see:`Notice::type_suppression_intervals`
- This is a table indexed on :bro:see:`Notice::Type` and yielding an
interval. It can be used as an easy way to extend the default
suppression interval for an entire :bro:see:`Notice::Type`
without having to create a whole :bro:see:`Notice::policy` entry
and setting the ``$suppress_for`` field.
Raising Notices
---------------
A script should raise a notice for any occurrence that a user may want to be
notified about or take action on. For example, whenever the base SSH analysis
scripts sees an SSH session where it is heuristically guessed to be a
successful login, it raises a Notice of the type ``SSH::Login``. The code in
the base SSH analysis script looks like this:
A script should raise a notice for any occurrence that a user may want
to be notified about or take action on. For example, whenever the base
SSH analysis scripts sees an SSH session where it is heuristically
guessed to be a successful login, it raises a Notice of the type
:bro:see:`SSH::Login`. The code in the base SSH analysis script looks
like this:
.. code:: bro
@ -242,10 +249,10 @@ the base SSH analysis script looks like this:
$msg="Heuristically detected successful SSH login.",
$conn=c]);
``NOTICE`` is a normal function in the global namespace which wraps a function
within the ``Notice`` namespace. It takes a single argument of the
``Notice::Info`` record type. The most common fields used when raising notices
are described in the following table:
:bro:see:`NOTICE` is a normal function in the global namespace which
wraps a function within the ``Notice`` namespace. It takes a single
argument of the :bro:see:`Notice::Info` record type. The most common
fields used when raising notices are described in the following table:
.. list-table::
:widths: 32 40
@ -263,21 +270,21 @@ are described in the following table:
information about this particular instance of the notice type.
* - ``$sub``
- This is a sub-message which meant for human readability but will
- This is a sub-message meant for human readability but will
frequently also be used to contain data meant to be matched with the
``Notice::policy``.
* - ``$conn``
- If a connection record is available when the notice is being raised
and the notice represents some attribute of the connection the
and the notice represents some attribute of the connection, then the
connection record can be given here. Other fields such as ``$id`` and
``$src`` will automatically be populated from this value.
* - ``$id``
- If a conn_id record is available when the notice is being raised and
the notice represents some attribute of the connection, the connection
be given here. Other fields such as ``$src`` will automatically be
populated from this value.
the notice represents some attribute of the connection, then the
connection can be given here. Other fields such as ``$src`` will
automatically be populated from this value.
* - ``$src``
- If the notice represents an attribute of a single host then it's
@ -295,9 +302,10 @@ are described in the following table:
* - ``$suppress_for``
- This field can be set if there is a natural suppression interval for
the notice that may be different than the default value. The value set
to this field can also be modified by a user's ``Notice::policy`` so
the value is not set permanently and unchangeably.
the notice that may be different than the default value. The
value set to this field can also be modified by a user's
:bro:see:`Notice::policy` so the value is not set permanently
and unchangeably.
When writing Bro scripts which raise notices, some thought should be given to
what the notice represents and what data should be provided to give a consumer
@ -305,7 +313,7 @@ of the notice the best information about the notice. If the notice is
representative of many connections and is an attribute of a host (e.g. a
scanning host) it probably makes most sense to fill out the ``$src`` field and
not give a connection or conn_id. If a notice is representative of a
connection attribute (e.g. an apparent SSH login) the it makes sense to fill
connection attribute (e.g. an apparent SSH login) then it makes sense to fill
out either ``$conn`` or ``$id`` based on the data that is available when the
notice is raised. Using care when inserting data into a notice will make later
analysis easier when only the data to fully represent the occurrence that
@ -325,7 +333,7 @@ The notice framework supports suppression for notices if the author of the
script that is generating the notice has indicated to the notice framework how
to identify notices that are intrinsically the same. Identification of these
"intrinsically duplicate" notices is implemented with an optional field in
``Notice::Info`` records named ``$identifier`` which is a simple string.
:bro:see:`Notice::Info` records named ``$identifier`` which is a simple string.
If the ``$identifier`` and ``$type`` fields are the same for two notices, the
notice framework actually considers them to be the same thing and can use that
information to suppress duplicates for a configurable period of time.
@ -337,12 +345,13 @@ information to suppress duplicates for a configurable period of time.
could be completely legitimate usage if no notices could ever be
considered to be duplicates.
The ``$identifier`` field is typically comprised of several pieces of data
related to the notice that when combined represent a unique instance of that
notice. Here is an example of the script
``policy/protocols/ssl/validate-certs.bro`` raising a notice for session
negotiations where the certificate or certificate chain did not validate
successfully against the available certificate authority certificates.
The ``$identifier`` field is typically comprised of several pieces of
data related to the notice that when combined represent a unique
instance of that notice. Here is an example of the script
:doc:`scripts/policy/protocols/ssl/validate-certs` raising a notice
for session negotiations where the certificate or certificate chain did
not validate successfully against the available certificate authority
certificates.
.. code:: bro
@ -369,7 +378,7 @@ it's assumed that the script author who is raising the notice understands the
full problem set and edge cases of the notice which may not be readily
apparent to users. If users don't want the suppression to take place or simply
want a different interval, they can always modify it with the
``Notice::policy``.
:bro:see:`Notice::policy`.
Extending Notice Framework

View file

@ -1,14 +1,16 @@
.. _CMake: http://www.cmake.org
.. _SWIG: http://www.swig.org
.. _Xcode: https://developer.apple.com/xcode/
.. _MacPorts: http://www.macports.org
.. _Fink: http://www.finkproject.org
.. _Homebrew: http://mxcl.github.com/homebrew
.. _bro downloads page: http://bro-ids.org/download/index.html
=================
Quick Start Guide
=================
.. class:: opening
.. rst-class:: opening
The short story for getting Bro up and running in a simple configuration
for analysis of either live traffic from a network interface or a packet
@ -26,24 +28,23 @@ source code forms.
Pre-Built Binary Release Packages
---------------------------------
See the `downloads page <{{docroot}}/download/index.html>`_ for currently
supported/targeted platforms.
See the `bro downloads page`_ for currently supported/targeted platforms.
* RPM
.. console::
.. console::
> sudo yum localinstall Bro-all*.rpm
sudo yum localinstall Bro-*.rpm
* DEB
.. console::
.. console::
> sudo gdebi Bro-all-*.deb
sudo gdebi Bro-*.deb
* MacOS Disk Image with Installer
Just open the ``Bro-all-*.dmg`` and then run the ``.pkg`` installer.
Just open the ``Bro-*.dmg`` and then run the ``.pkg`` installer.
Everything installed by the package will go into ``/opt/bro``.
The primary install prefix for binary packages is ``/opt/bro``.
@ -56,89 +57,103 @@ Building From Source
Required Dependencies
~~~~~~~~~~~~~~~~~~~~~
The following dependencies are required to build Bro:
* RPM/RedHat-based Linux:
.. console::
.. console::
> sudo yum install cmake make gcc gcc-c++ flex bison libpcap-devel openssl-devel python-devel swig
sudo yum install cmake make gcc gcc-c++ flex bison libpcap-devel openssl-devel python-devel swig zlib-devel file-devel
* DEB/Debian-based Linux:
.. console::
.. console::
> sudo apt-get install cmake make gcc g++ flex bison libpcap-dev libssl-dev python-dev swig
sudo apt-get install cmake make gcc g++ flex bison libpcap-dev libssl-dev python-dev swig zlib1g-dev libmagic-dev
* FreeBSD
Most required dependencies should come with a minimal FreeBSD install
except for the following.
.. console::
.. console::
> sudo pkg_add -r cmake swig bison python
sudo pkg_add -r bash cmake swig bison python
Note that ``bash`` needs to be in ``PATH``, which by default it is
not. The FreeBSD package installs the binary into
``/usr/local/bin``.
* Mac OS X
Snow Leopard (10.6) comes with all required dependencies except for CMake_.
Compiling source code on Macs requires first downloading Xcode_,
then going through its "Preferences..." -> "Downloads" menus to
install the "Command Line Tools" component.
Lion (10.7) comes with all required dependencies except for CMake_ and SWIG_.
Lion (10.7) and Mountain Lion (10.8) come with all required
dependencies except for CMake_, SWIG_, and ``libmagic``.
Distributions of these dependencies can be obtained from the project websites
linked above, but they're also likely available from your preferred Mac OS X
package management system (e.g. MacPorts_, Fink_, or Homebrew_).
Distributions of these dependencies can be obtained from the project
websites linked above, but they're also likely available from your
preferred Mac OS X package management system (e.g. MacPorts_, Fink_,
or Homebrew_).
Note that the MacPorts ``swig`` package may not include any specific
language support so you may need to also install ``swig-ruby`` and
``swig-python``.
Specifically for MacPorts, the ``swig``, ``swig-ruby``, ``swig-python``
and ``file`` packages provide the required dependencies.
Optional Dependencies
~~~~~~~~~~~~~~~~~~~~~
Bro can use libmagic for identifying file types, libGeoIP for geo-locating
IP addresses, libz for (de)compression during analysis and communication,
and sendmail for sending emails.
Bro can use libGeoIP for geo-locating IP addresses, and sendmail for
sending emails.
* RPM/RedHat-based Linux:
* RedHat Enterprise Linux:
.. console::
.. console::
> sudo yum install zlib-devel file-devel GeoIP-devel sendmail
sudo yum install geoip-devel sendmail
* CentOS Linux:
.. console::
sudo yum install GeoIP-devel sendmail
* DEB/Debian-based Linux:
.. console::
.. console::
> sudo apt-get install zlib1g-dev libmagic-dev libgeoip-dev sendmail
sudo apt-get install libgeoip-dev sendmail
* Ports-based FreeBSD
.. console::
.. console::
> sudo pkg_add -r GeoIP
sudo pkg_add -r GeoIP
libz, libmagic, and sendmail are typically already available.
sendmail is typically already available.
* Mac OS X
Vanilla OS X installations don't ship with libmagic or libGeoIP, but
if installed from your preferred package management system (e.g. MacPorts,
Fink Homebrew), they should be automatically detected and Bro will compile
Fink, or Homebrew), they should be automatically detected and Bro will compile
against them.
Additional steps may be needed to `get the right GeoIP database
<{{git('bro:doc/geoip.rst')}}>`_.
Additional steps may be needed to :doc:`get the right GeoIP database <geoip>`
Compiling Bro Source Code
~~~~~~~~~~~~~~~~~~~~~~~~~
Bro releases are bundled into source packages for convenience and
available from the `downloads page <{{docroot}}/download/index.html>`_.
available from the `bro downloads page`_.
The latest Bro development versions are obtainable through git repositories
hosted at `git.bro-ids.org <http://git.bro-ids.org>`_. See our `git development
documentation <{{docroot}}/development/process.html>`_ for comprehensive
information on Bro's use of git revision control, but the short story for
downloading the full source code experience for Bro via git is:
The latest Bro development versions are obtainable through git
repositories hosted at `git.bro-ids.org <http://git.bro-ids.org>`_. See
our `git development documentation
<http://bro-ids.org/development/process.html>`_ for comprehensive
information on Bro's use of git revision control, but the short story
for downloading the full source code experience for Bro via git is:
.. console::
@ -146,8 +161,8 @@ downloading the full source code experience for Bro via git is:
.. note:: If you choose to clone the ``bro`` repository non-recursively for
a "minimal Bro experience", be aware that compiling it depends on
BinPAC, which has it's own ``binpac`` repository. Either install it
first or initizalize/update the cloned ``bro`` repository's
BinPAC, which has its own ``binpac`` repository. Either install it
first or initialize/update the cloned ``bro`` repository's
``aux/binpac`` submodule.
See the ``INSTALL`` file included with the source code for more information
@ -157,9 +172,9 @@ desired root install path):
.. console::
> ./configure --prefix=/desired/install/path
> make
> make install
./configure --prefix=/desired/install/path
make
make install
The default installation prefix is ``/usr/local/bro``, which would typically
require root privileges when doing the ``make install``.
@ -174,13 +189,13 @@ Bourne-Shell Syntax:
.. console::
> export PATH=/usr/local/bro/bin:$PATH
export PATH=/usr/local/bro/bin:$PATH
C-Shell Syntax:
.. console::
> setenv PATH /usr/local/bro/bin:$PATH
setenv PATH /usr/local/bro/bin:$PATH
Or substitute ``/opt/bro/bin`` instead if you installed from a binary package.
@ -191,7 +206,7 @@ BroControl is an interactive shell for easily operating/managing Bro
installations on a single system or even across multiple systems in a
traffic-monitoring cluster.
.. note:: Below, ``$PREFIX``, is used to reference the Bro installation
.. note:: Below, ``$PREFIX`` is used to reference the Bro installation
root directory.
A Minimal Starting Configuration
@ -211,7 +226,7 @@ Now start the BroControl shell like:
.. console::
> broctl
broctl
Since this is the first-time use of the shell, perform an initial installation
of the BroControl configuration:
@ -233,10 +248,9 @@ policy and output the results in ``$PREFIX/logs``.
.. note:: The user starting BroControl needs permission to capture
network traffic. If you are not root, you may need to grant further
privileges to the account you're using; see the `FAQ
<{{docroot}}/documentation/faq.html>`_. Also, if it
looks like Bro is not seeing any traffic, check out the FAQ entry
checksum offloading.
privileges to the account you're using; see the :doc:`FAQ <faq>`.
Also, if it looks like Bro is not seeing any traffic, check out
the FAQ entry on checksum offloading.
You can leave it running for now, but to stop this Bro instance you would do:
@ -244,9 +258,7 @@ You can leave it running for now, but to stop this Bro instance you would do:
[BroControl] > stop
We also recommend to insert the following entry into `crontab`:
.. console::
We also recommend to insert the following entry into `crontab`::
0-59/5 * * * * $PREFIX/bin/broctl cron
@ -373,9 +385,7 @@ the variable's value may not change at run-time, but whose initial value can be
modified via the ``redef`` operator at parse-time.
So let's continue on our path to modify the behavior for the two SSL
and SSH notices. Looking at
`$PREFIX/share/bro/base/frameworks/notice/main.bro
<{{autodoc_bro_scripts}}/scripts/base/frameworks/notice/main.html>`_,
and SSH notices. Looking at :doc:`scripts/base/frameworks/notice/main`,
we see that it advertises:
.. code:: bro
@ -449,7 +459,7 @@ that only takes the email action for SSH logins to a defined set of servers:
]
};
You'll just have to trust the syntax for now, but what we've done is first
You'll just have to trust the syntax for now, but what we've done is
first declare our own variable to hold a set of watched addresses,
``watched_servers``; then added a record to the policy that will generate
an email on the condition that the predicate function evaluates to true, which
@ -477,8 +487,7 @@ tweak the most basic options. Here's some suggestions on what to explore next:
* Reading the code of scripts that ship with Bro is also a great way to gain
understanding of the language and how you can start writing your own custom
analysis.
* Review the `FAQ <{{docroot}}/documentation/faq.html>`_.
* Check out more `documentation <{{docroot}}/documentation/index.html>`_.
* Review the :doc:`FAQ <faq>`.
* Continue reading below for another mini-tutorial on using Bro as a standalone
command-line utility.
@ -496,7 +505,7 @@ Analyzing live traffic from an interface is simple:
.. console::
> bro -i en0 <list of scripts to load>
bro -i en0 <list of scripts to load>
``en0`` can be replaced by the interface of your choice and for the list of
scripts, you can just use "all" for now to perform all the default analysis
@ -504,7 +513,7 @@ that's available.
Bro will output log files into the working directory.
.. note:: The `FAQ <{{docroot}}/documentation/faq.html>`_ entries about
.. note:: The :doc:`FAQ <faq>` entries about
capturing as an unprivileged user and checksum offloading are particularly
relevant at this point.
@ -513,7 +522,7 @@ command-line:
.. console::
> bro -i en0 local
bro -i en0 local
This will cause Bro to print a warning about lacking the
``Site::local_nets`` variable being configured. You can supply this
@ -522,7 +531,7 @@ in place of the example subnets):
.. console::
> bro -r mypackets.trace local "Site::local_nets += { 1.2.3.0/24, 5.6.7.0/24 }"
bro -r mypackets.trace local "Site::local_nets += { 1.2.3.0/24, 5.6.7.0/24 }"
Reading Packet Capture (pcap) Files
@ -533,7 +542,7 @@ like this:
.. console::
> sudo tcpdump -i en0 -s 0 -w mypackets.trace
sudo tcpdump -i en0 -s 0 -w mypackets.trace
Where ``en0`` can be replaced by the correct interface for your system as
shown by e.g. ``ifconfig``. (The ``-s 0`` argument tells it to capture
@ -544,7 +553,7 @@ and tell Bro to perform all the default analysis on the capture which primarily
.. console::
> bro -r mypackets.trace
bro -r mypackets.trace
Bro will output log files into the working directory.
@ -553,7 +562,7 @@ script that we include as a suggested configuration:
.. console::
> bro -r mypackets.trace local
bro -r mypackets.trace local
Telling Bro Which Scripts to Load
@ -563,7 +572,7 @@ A command-line invocation of Bro typically looks like:
.. console::
> bro <options> <policies...>
bro <options> <policies...>
Where the last arguments are the specific policy scripts that this Bro
instance will load. These arguments don't have to include the ``.bro``
@ -578,7 +587,7 @@ logging) and adds SSL certificate validation.
.. console::
> bro -r mypackets.trace protocols/ssl/validate-certs
bro -r mypackets.trace protocols/ssl/validate-certs
You might notice that a script you load from the command line uses the
``@load`` directive in the Bro language to declare dependence on other scripts.

194
doc/reporting-problems.rst Normal file
View file

@ -0,0 +1,194 @@
Reporting Problems
==================
.. rst-class:: opening
Here we summarize some steps to follow when you see Bro doing
something it shouldn't. To provide help, it is often crucial for
us to have a way of reliably reproducing the effect you're seeing.
Unfortunately, reproducing problems can be rather tricky with Bro
because more often than not, they occur only in either very rare
situations or only after Bro has been running for some time. In
particular, getting a small trace showing a specific effect can be
a real problem. In the following, we'll summarize some strategies
to this end.
Reporting Problems
------------------
Generally, when you encounter a problem with Bro, the best thing to do
is opening a new ticket in `Bro's issue tracker
<http://tracker.bro-ids.org/>`__ and include information on how to
reproduce the issue. Ideally, your ticket should come with the
following:
* The Bro version you're using (if working directly from the git
repository, the branch and revision number.)
* The output you're seeing along with a description of what you'd expect
Bro to do instead.
* A *small* trace in `libpcap format <http://www.tcpdump.org>`__
demonstrating the effect (assuming the problem doesn't happen right
at startup already).
* The exact command-line you're using to run Bro with that trace. If
you can, please try to run the Bro binary directly from the command
line rather than using BroControl.
* Any non-standard scripts you're using (but please only those really
necessary; just a small code snippet triggering the problem would
be perfect).
* If you encounter a crash, information from the core dump, such as
the stack backtrace, can be very helpful. See below for more on
this.
How Do I Get a Trace File?
--------------------------
As Bro is usually running live, coming up with a small trace file that
reproduces a problem can turn out to be quite a challenge. Often it
works best to start with a large trace that triggers the problem,
and then successively thin it out as much as possible.
To get to the initial large trace, here are a few things you can try:
* Capture a trace with `tcpdump <http://www.tcpdump.org/>`__, either
on the same interface Bro is running on, or on another host where
you can generate traffic of the kind likely triggering the problem
(e.g., if you're seeing problems with the HTTP analyzer, record some
of your Web browsing on your desktop.) When using tcpdump, don't
forget to record *complete* packets (``tcpdump -s 0 ...``). You can
reduce the amount of traffic captured by using a suitable BPF filter
(e.g., for HTTP only, try ``port 80``).
* Bro's command-line option ``-w <trace>`` records all packets it
processes into the given file. You can then later run Bro
offline on this trace and it will process the packets in the same
way as it did live. This is particularly helpful with problems that
only occur after Bro has already been running for some time. For
example, sometimes a crash may be triggered by a particular kind of
traffic only occurring rarely. Running Bro live with ``-w`` and
then, after the crash, offline on the recorded trace might, with a
little bit of luck, reproduce the problem reliably. However, be
careful with ``-w``: it can result in huge trace files, quickly
filling up your disk. (One way to mitigate the space issues is to
periodically delete the trace file by configuring
``rotate-logs.bro`` accordingly. BroControl does that for you if you
set its ``SaveTraces`` option.)
* Finally, you can try running Bro on a publically available trace
file, such as `anonymized FTP traffic <http://www-nrg.ee.lbl.gov
/anonymized-traces.html>`__, `headers-only enterprise traffic
<http://www.icir.org/enterprise-tracing/Overview.html>`__, or
`Defcon traffic <http://cctf.shmoo.com/>`__. Some of these
particularly stress certain components of Bro (e.g., the Defcon
traces contain tons of scans).
Once you have a trace that demonstrates the effect, you will often
notice that it's pretty big, in particular if recorded from the link
you're monitoring. Therefore, the next step is to shrink its size as
much as possible. Here are a few things you can try to this end:
* Very often, a single connection is able to demonstrate the problem.
If you can identify which one it is (e.g., from one of Bro's
``*.log`` files) you can extract the connection's packets from the
trace using tcpdump by filtering for the corresponding 4-tuple of
addresses and ports:
.. console::
> tcpdump -r large.trace -w small.trace host <ip1> and port <port1> and host <ip2> and port <port2>
* If you can't reduce the problem to a connection, try to identify
either a host pair or a single host triggering it, and filter down
the trace accordingly.
* You can try to extract a smaller time slice from the trace using
`TCPslice <http://www.tcpdump.org/related.html>`__. For example, to
extract the first 100 seconds from the trace:
.. console::
# Test comment
> tcpslice +100 <in >out
Alternatively, tcpdump extracts the first ``n`` packets with its
option ``-c <n>``.
Getting More Information After a Crash
--------------------------------------
If Bro crashes, a *core dump* can be very helpful to nail down the
problem. Examining a core is not for the faint of heart but can reveal
extremely useful information.
First, you should configure Bro with the option ``--enable-debug`` and
recompile; this will disable all compiler optimizations and thus make
the core dump more useful (don't expect great performance with this
version though; compiling Bro without optimization has a noticeable
impact on its CPU usage.). Then enable core dumps if you haven't
already (e.g., ``ulimit -c unlimited`` if you're using bash).
Once Bro has crashed, start gdb with the Bro binary and the file
containing the core dump. (Alternatively, you can also run Bro
directly inside gdb instead of working from a core file.) The first
helpful information to include with your tracker ticket is a stack
backtrace, which you get with gdb's ``bt`` command:
.. console::
> gdb bro core
[...]
> bt
If the crash occurs inside Bro's script interpreter, the next thing to
do is identifying the line of script code processed just before the
abnormal termination. Look for methods in the stack backtrace which
belong to any of the script interpreter's classes. Roughly speaking,
these are all classes with names ending in ``Expr``, ``Stmt``, or
``Val``. Then climb up the stack with ``up`` until you reach the first
of these methods. The object to which ``this`` is pointing will have a
``Location`` object, which in turn contains the file name and line
number of the corresponding piece of script code. Continuing the
example from above, here's how to get that information:
.. console::
[in gdb]
> up
> ...
> up
> print this->location->filename
> print this->location->first_line
If the crash occurs while processing input packets but you cannot
directly tell which connection is responsible (and thus not extract
its packets from the trace as suggested above), try getting the
4-tuple of the connection currently being processed from the core dump
by again examining the stack backtrace, this time looking for methods
belonging to the ``Connection`` class. That class has members
``orig_addr``/``resp_addr`` and ``orig_port``/``resp_port`` storing
(pointers to) the IP addresses and ports respectively:
.. console::
[in gdb]
> up
> ...
> up
> printf "%08x:%04x %08x:%04x\n", *this->orig_addr, this->orig_port, *this->resp_addr, this->resp_port
Note that these values are stored in `network byte order
<http://en.wikipedia.org/wiki/Endianness#Endianness_in_networking>`__
so you will need to flip the bytes around if you are on a low-endian
machine (which is why the above example prints them in hex). For
example, if an IP address prints as ``0100007f`` , that's 127.0.0.1 .

View file

@ -1,16 +1,3 @@
set(BIF_SRC_DIR ${PROJECT_SOURCE_DIR}/src)
set(RST_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/rest_output)
set(DOC_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}/out)
set(DOC_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/source)
set(DOC_SOURCE_WORKDIR ${CMAKE_CURRENT_BINARY_DIR}/source)
file(GLOB_RECURSE DOC_SOURCES FOLLOW_SYMLINKS "*")
# configure the Sphinx config file (expand variables CMake might know about)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in
${CMAKE_CURRENT_BINARY_DIR}/conf.py
@ONLY)
# find out what BROPATH to use when executing bro
execute_process(COMMAND ${CMAKE_BINARY_DIR}/bro-path-dev
OUTPUT_VARIABLE BROPATH
@ -48,7 +35,7 @@ endif ()
# which summary text can be extracted at build time
# ${group}_doc_names: a running list of reST style document names that can be
# given to a :doc: role, shared indices with ${group}_files
#
macro(REST_TARGET srcDir broInput)
set(absSrcPath ${srcDir}/${broInput})
get_filename_component(basename ${broInput} NAME)
@ -86,12 +73,14 @@ macro(REST_TARGET srcDir broInput)
elseif (${extension} STREQUAL ".bif.bro")
set(group bifs)
elseif (relDstDir)
set(pkgIndex scripts/${relDstDir}/index)
set(group ${pkgIndex})
set(group ${relDstDir}/index)
# add package index to master package list if not already in it
list(FIND MASTER_PKG_LIST ${pkgIndex} _found)
# and if a __load__.bro exists in the original script directory
list(FIND MASTER_PKG_LIST ${relDstDir} _found)
if (_found EQUAL -1)
list(APPEND MASTER_PKG_LIST ${pkgIndex})
if (EXISTS ${CMAKE_SOURCE_DIR}/scripts/${relDstDir}/__load__.bro)
list(APPEND MASTER_PKG_LIST ${relDstDir})
endif ()
endif ()
else ()
set(group "")
@ -134,6 +123,7 @@ macro(REST_TARGET srcDir broInput)
ARGS -rf .state *.log *.rst
DEPENDS bro
DEPENDS ${absSrcPath}
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
COMMENT "[Bro] Generating reST docs for ${broInput}"
)
@ -143,19 +133,21 @@ endmacro(REST_TARGET)
include(DocSourcesList.cmake)
# create temporary list of all docs to include in the master policy/index file
set(MASTER_POLICY_INDEX ${CMAKE_CURRENT_BINARY_DIR}/policy_index)
file(WRITE ${MASTER_POLICY_INDEX} "${MASTER_POLICY_INDEX_TEXT}")
# create the temporary list of all packages to include in the master
# policy/packages.rst file
set(MASTER_PACKAGE_INDEX ${CMAKE_CURRENT_BINARY_DIR}/pkg_index)
set(MASTER_PKG_INDEX_TEXT "")
foreach (pkg ${MASTER_PKG_LIST})
# strip of the trailing /index for the link name
get_filename_component(lnktxt ${pkg} PATH)
# pretty-up the link name by removing common scripts/ prefix
string(REPLACE "scripts/" "" lnktxt "${lnktxt}")
set(MASTER_PKG_INDEX_TEXT "${MASTER_PKG_INDEX_TEXT}\n ${lnktxt} <${pkg}>")
set(MASTER_PKG_INDEX_TEXT
"${MASTER_PKG_INDEX_TEXT}\n:doc:`${pkg} <${pkg}/index>`\n")
if (EXISTS ${CMAKE_SOURCE_DIR}/scripts/${pkg}/README)
file(STRINGS ${CMAKE_SOURCE_DIR}/scripts/${pkg}/README pkgreadme)
foreach (line ${pkgreadme})
set(MASTER_PKG_INDEX_TEXT "${MASTER_PKG_INDEX_TEXT}\n ${line}")
endforeach ()
set(MASTER_PKG_INDEX_TEXT "${MASTER_PKG_INDEX_TEXT}\n")
endif ()
endforeach ()
file(WRITE ${MASTER_PACKAGE_INDEX} "${MASTER_PKG_INDEX_TEXT}")
@ -186,11 +178,11 @@ if (EXISTS ${RST_OUTPUT_DIR})
list(FIND ALL_REST_OUTPUTS ${_doc} _found)
if (_found EQUAL -1)
file(REMOVE ${_doc})
message(STATUS "AutoDoc: remove stale reST doc: ${_doc}")
message(STATUS "Broxygen: remove stale reST doc: ${_doc}")
string(REPLACE .rst .bro _brofile ${_doc})
if (EXISTS ${_brofile})
file(REMOVE ${_brofile})
message(STATUS "AutoDoc: remove stale bro source: ${_brofile}")
message(STATUS "Broxygen: remove stale bro source: ${_brofile}")
endif ()
endif ()
endforeach ()
@ -211,57 +203,3 @@ add_custom_target(restclean
COMMAND "${CMAKE_COMMAND}" -E remove_directory
${RST_OUTPUT_DIR}
VERBATIM)
# The "sphinxdoc" target generates reST documentation for any outdated bro
# scripts and then uses Sphinx to generate HTML documentation from the reST
add_custom_target(sphinxdoc
# copy the template documentation to the build directory
# to give as input for sphinx
COMMAND "${CMAKE_COMMAND}" -E copy_directory
${DOC_SOURCE_DIR}
${DOC_SOURCE_WORKDIR}
# copy generated policy script documentation into the
# working copy of the template documentation
COMMAND "${CMAKE_COMMAND}" -E copy_directory
${RST_OUTPUT_DIR}
${DOC_SOURCE_WORKDIR}/scripts
# append to the master index of all policy scripts
COMMAND cat ${MASTER_POLICY_INDEX} >>
${DOC_SOURCE_WORKDIR}/scripts/index.rst
# append to the master index of all policy packages
COMMAND cat ${MASTER_PACKAGE_INDEX} >>
${DOC_SOURCE_WORKDIR}/packages.rst
# construct a reST file for each group
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/group_index_generator.py
${CMAKE_CURRENT_BINARY_DIR}/group_list
${CMAKE_CURRENT_BINARY_DIR}
${DOC_SOURCE_WORKDIR}
# tell sphinx to generate html
COMMAND sphinx-build
-b html
-c ${CMAKE_CURRENT_BINARY_DIR}
-d ${DOC_OUTPUT_DIR}/doctrees
${DOC_SOURCE_WORKDIR}
${DOC_OUTPUT_DIR}/html
# create symlink to the html output directory for convenience
COMMAND "${CMAKE_COMMAND}" -E create_symlink
${DOC_OUTPUT_DIR}/html
${CMAKE_BINARY_DIR}/html
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "[Sphinx] Generating HTML policy script docs"
# SOURCES just adds stuff to IDE projects as a convenience
SOURCES ${DOC_SOURCES})
# The "sphinxclean" target removes just the Sphinx input/output directories
# from the build directory.
add_custom_target(sphinxclean
COMMAND "${CMAKE_COMMAND}" -E remove_directory
${DOC_SOURCE_WORKDIR}
COMMAND "${CMAKE_COMMAND}" -E remove_directory
${DOC_OUTPUT_DIR}
VERBATIM)
add_dependencies(sphinxdoc sphinxclean restdoc)
add_dependencies(doc sphinxdoc)
add_dependencies(docclean sphinxclean restclean)

View file

@ -19,6 +19,7 @@ rest_target(${psd} base/init-bare.bro internal)
rest_target(${CMAKE_BINARY_DIR}/src base/bro.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/const.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/event.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/input.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/logging.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/reporter.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/strings.bif.bro)
@ -31,10 +32,18 @@ rest_target(${psd} base/frameworks/cluster/setup-connections.bro)
rest_target(${psd} base/frameworks/communication/main.bro)
rest_target(${psd} base/frameworks/control/main.bro)
rest_target(${psd} base/frameworks/dpd/main.bro)
rest_target(${psd} base/frameworks/input/main.bro)
rest_target(${psd} base/frameworks/input/readers/ascii.bro)
rest_target(${psd} base/frameworks/input/readers/benchmark.bro)
rest_target(${psd} base/frameworks/input/readers/raw.bro)
rest_target(${psd} base/frameworks/intel/main.bro)
rest_target(${psd} base/frameworks/logging/main.bro)
rest_target(${psd} base/frameworks/logging/postprocessors/scp.bro)
rest_target(${psd} base/frameworks/logging/postprocessors/sftp.bro)
rest_target(${psd} base/frameworks/logging/writers/ascii.bro)
rest_target(${psd} base/frameworks/logging/writers/dataseries.bro)
rest_target(${psd} base/frameworks/logging/writers/elasticsearch.bro)
rest_target(${psd} base/frameworks/logging/writers/none.bro)
rest_target(${psd} base/frameworks/metrics/cluster.bro)
rest_target(${psd} base/frameworks/metrics/main.bro)
rest_target(${psd} base/frameworks/metrics/non-cluster.bro)
@ -52,12 +61,15 @@ rest_target(${psd} base/frameworks/packet-filter/netstats.bro)
rest_target(${psd} base/frameworks/reporter/main.bro)
rest_target(${psd} base/frameworks/signatures/main.bro)
rest_target(${psd} base/frameworks/software/main.bro)
rest_target(${psd} base/frameworks/tunnels/main.bro)
rest_target(${psd} base/protocols/conn/contents.bro)
rest_target(${psd} base/protocols/conn/inactivity.bro)
rest_target(${psd} base/protocols/conn/main.bro)
rest_target(${psd} base/protocols/conn/polling.bro)
rest_target(${psd} base/protocols/dns/consts.bro)
rest_target(${psd} base/protocols/dns/main.bro)
rest_target(${psd} base/protocols/ftp/file-extract.bro)
rest_target(${psd} base/protocols/ftp/gridftp.bro)
rest_target(${psd} base/protocols/ftp/main.bro)
rest_target(${psd} base/protocols/ftp/utils-commands.bro)
rest_target(${psd} base/protocols/http/file-extract.bro)
@ -70,6 +82,8 @@ rest_target(${psd} base/protocols/irc/main.bro)
rest_target(${psd} base/protocols/smtp/entities-excerpt.bro)
rest_target(${psd} base/protocols/smtp/entities.bro)
rest_target(${psd} base/protocols/smtp/main.bro)
rest_target(${psd} base/protocols/socks/consts.bro)
rest_target(${psd} base/protocols/socks/main.bro)
rest_target(${psd} base/protocols/ssh/main.bro)
rest_target(${psd} base/protocols/ssl/consts.bro)
rest_target(${psd} base/protocols/ssl/main.bro)
@ -102,6 +116,7 @@ rest_target(${psd} policy/misc/analysis-groups.bro)
rest_target(${psd} policy/misc/capture-loss.bro)
rest_target(${psd} policy/misc/loaded-scripts.bro)
rest_target(${psd} policy/misc/profiling.bro)
rest_target(${psd} policy/misc/stats.bro)
rest_target(${psd} policy/misc/trim-trace-file.bro)
rest_target(${psd} policy/protocols/conn/known-hosts.bro)
rest_target(${psd} policy/protocols/conn/known-services.bro)
@ -133,6 +148,7 @@ rest_target(${psd} policy/protocols/ssl/known-certs.bro)
rest_target(${psd} policy/protocols/ssl/validate-certs.bro)
rest_target(${psd} policy/tuning/defaults/packet-fragments.bro)
rest_target(${psd} policy/tuning/defaults/warnings.bro)
rest_target(${psd} policy/tuning/logs-to-elasticsearch.bro)
rest_target(${psd} policy/tuning/track-all-assets.bro)
rest_target(${psd} site/local-manager.bro)
rest_target(${psd} site/local-proxy.bro)

View file

@ -1,6 +1,6 @@
This directory contains scripts and templates that can be used to automate
the generation of Bro script documentation. Several build targets are defined
by CMake:
by CMake and available in the top-level Makefile:
``restdoc``
@ -15,29 +15,10 @@ by CMake:
``build/`` directory inside ``reST`` (a symlink to
``doc/scripts/rest_output``).
``doc``
This target depends on a Python interpreter (>=2.5) and
`Sphinx <http://sphinx.pocoo.org/>`_ being installed. Sphinx can be
installed like::
> sudo easy_install sphinx
This target will first build ``restdoc`` target and then copy the
resulting reST files as an input directory to Sphinx.
After completion, HTML documentation can be located in the CMake
``build/`` directory inside ``html`` (a symlink to
``doc/scripts/out/html``)
``restclean``
This target removes any reST documentation that has been generated so far.
``docclean``
This target removes Sphinx inputs and outputs from the CMake ``build/`` dir.
The ``genDocSourcesList.sh`` script can be run to automatically generate
``DocSourcesList.cmake``, which is the file CMake uses to define the list
of documentation targets. This script should be run after adding new
@ -54,18 +35,10 @@ script's name to the blacklist, then append a ``rest_target()`` to the
``statictext`` variable where the first argument is the source directory
containing the policy script to document, the second argument is the file
name of the policy script, and the third argument is the path/name of a
pre-created reST document in the ``source/`` directory to which the
pre-created reST document in the ``../`` source directory to which the
``make doc`` process can append script documentation references. This
pre-created reST document should also then be linked to from the TOC tree
in ``source/index.rst``.
The Sphinx source tree template in ``source/`` can be modified to add more
common/general documentation, style sheets, JavaScript, etc. The Sphinx
config file is produced from ``conf.py.in``, so that can be edited to change
various Sphinx options, like setting the default HTML rendering theme.
There is also a custom Sphinx domain implemented in ``source/ext/bro.py``
which adds some reST directives and roles that aid in generating useful
index entries and cross-references.
in ``../index.rst``.
See ``example.bro`` for an example of how to document a Bro script such that
``make doc`` will be able to produce reST/HTML documentation for it.

5
doc/scripts/bifs.rst Normal file
View file

@ -0,0 +1,5 @@
.. This is a stub doc to which broxygen appends during the build process
Built-In Functions (BIFs)
=========================

638
doc/scripts/builtins.rst Normal file
View file

@ -0,0 +1,638 @@
Builtin Types and Attributes
============================
Types
-----
The Bro scripting language supports the following built-in types.
.. bro:type:: void
An internal Bro type representing an absence of a type. Should
most often be seen as a possible function return type.
.. bro:type:: bool
Reflects a value with one of two meanings: true or false. The two
``bool`` constants are ``T`` and ``F``.
.. bro:type:: int
A numeric type representing a signed integer. An ``int`` constant
is a string of digits preceded by a ``+`` or ``-`` sign, e.g.
``-42`` or ``+5``. When using type inferencing use care so that the
intended type is inferred, e.g. ``local size_difference = 0`` will
infer :bro:type:`count`, while ``local size_difference = +0``
will infer :bro:type:`int`.
.. bro:type:: count
A numeric type representing an unsigned integer. A ``count``
constant is a string of digits, e.g. ``1234`` or ``0``.
.. bro:type:: counter
An alias to :bro:type:`count`.
.. TODO: is there anything special about this type?
.. bro:type:: double
A numeric type representing a double-precision floating-point
number. Floating-point constants are written as a string of digits
with an optional decimal point, optional scale-factor in scientific
notation, and optional ``+`` or ``-`` sign. Examples are ``-1234``,
``-1234e0``, ``3.14159``, and ``.003e-23``.
.. bro:type:: time
A temporal type representing an absolute time. There is currently
no way to specify a ``time`` constant, but one can use the
:bro:id:`current_time` or :bro:id:`network_time` built-in functions
to assign a value to a ``time``-typed variable.
.. bro:type:: interval
A temporal type representing a relative time. An ``interval``
constant can be written as a numeric constant followed by a time
unit where the time unit is one of ``usec``, ``msec``, ``sec``, ``min``,
``hr``, or ``day`` which respectively represent microseconds, milliseconds,
seconds, minutes, hours, and days. Whitespace between the numeric
constant and time unit is optional. Appending the letter "s" to the
time unit in order to pluralize it is also optional (to no semantic
effect). Examples of ``interval`` constants are ``3.5 min`` and
``3.5mins``. An ``interval`` can also be negated, for example ``-
12 hr`` represents "twelve hours in the past". Intervals also
support addition, subtraction, multiplication, division, and
comparison operations.
.. bro:type:: string
A type used to hold character-string values which represent text.
String constants are created by enclosing text in double quotes (")
and the backslash character (\\) introduces escape sequences.
Note that Bro represents strings internally as a count and vector of
bytes rather than a NUL-terminated byte string (although string
constants are also automatically NUL-terminated). This is because
network traffic can easily introduce NULs into strings either by
nature of an application, inadvertently, or maliciously. And while
NULs are allowed in Bro strings, when present in strings passed as
arguments to many functions, a run-time error can occur as their
presence likely indicates a sort of problem. In that case, the
string will also only be represented to the user as the literal
"<string-with-NUL>" string.
.. bro:type:: pattern
A type representing regular-expression patterns which can be used
for fast text-searching operations. Pattern constants are created
by enclosing text within forward slashes (/) and is the same syntax
as the patterns supported by the `flex lexical analyzer
<http://flex.sourceforge.net/manual/Patterns.html>`_. The speed of
regular expression matching does not depend on the complexity or
size of the patterns. Patterns support two types of matching, exact
and embedded.
In exact matching the ``==`` equality relational operator is used
with one :bro:type:`pattern` operand and one :bro:type:`string`
operand (order of operands does not matter) to check whether the full
string exactly matches the pattern. In exact matching, the ``^``
beginning-of-line and ``$`` end-of-line anchors are redundant since
the pattern is implicitly anchored to the beginning and end of the
line to facilitate an exact match. For example::
/foo|bar/ == "foo"
yields true, while::
/foo|bar/ == "foobar"
yields false. The ``!=`` operator would yield the negation of ``==``.
In embedded matching the ``in`` operator is used with one
:bro:type:`pattern` operand (which must be on the left-hand side) and
one :bro:type:`string` operand, but tests whether the pattern
appears anywhere within the given string. For example::
/foo|bar/ in "foobar"
yields true, while::
/^oob/ in "foobar"
is false since "oob" does not appear at the start of "foobar". The
``!in`` operator would yield the negation of ``in``.
.. bro:type:: enum
A type allowing the specification of a set of related values that
have no further structure. The only operations allowed on
enumerations are equality comparisons and they do not have
associated values or ordering. An example declaration:
.. code:: bro
type color: enum { Red, White, Blue, };
The last comma after ``Blue`` is optional.
.. bro:type:: timer
.. TODO: is this a type that's exposed to users?
.. bro:type:: port
A type representing transport-level port numbers. Besides TCP and
UDP ports, there is a concept of an ICMP "port" where the source
port is the ICMP message type and the destination port the ICMP
message code. A ``port`` constant is written as an unsigned integer
followed by one of ``/tcp``, ``/udp``, ``/icmp``, or ``/unknown``.
Ports can be compared for equality and also for ordering. When
comparing order across transport-level protocols, ``unknown`` <
``tcp`` < ``udp`` < ``icmp``, for example ``65535/tcp`` is smaller
than ``0/udp``.
.. bro:type:: addr
A type representing an IP address.
IPv4 address constants are written in "dotted quad" format,
``A1.A2.A3.A4``, where Ai all lie between 0 and 255.
IPv6 address constants are written as colon-separated hexadecimal form
as described by :rfc:`2373`, but additionally encased in square brackets.
The mixed notation with embedded IPv4 addresses as dotted-quads in the
lower 32 bits is also allowed.
Some examples: ``[2001:db8::1]``, ``[::ffff:192.168.1.100]``, or
``[aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222]``.
Hostname constants can also be used, but since a hostname can
correspond to multiple IP addresses, the type of such variable is a
:bro:type:`set` of :bro:type:`addr` elements. For example:
.. code:: bro
local a = www.google.com;
Addresses can be compared for (in)equality using ``==`` and ``!=``.
They can also be masked with ``/`` to produce a :bro:type:`subnet`:
.. code:: bro
local a: addr = 192.168.1.100;
local s: subnet = 192.168.0.0/16;
if ( a/16 == s )
print "true";
And checked for inclusion within a :bro:type:`subnet` using ``in`` :
.. code:: bro
local a: addr = 192.168.1.100;
local s: subnet = 192.168.0.0/16;
if ( a in s )
print "true";
.. bro:type:: subnet
A type representing a block of IP addresses in CIDR notation. A
``subnet`` constant is written as an :bro:type:`addr` followed by a
slash (/) and then the network prefix size specified as a decimal
number. For example, ``192.168.0.0/16`` or ``[fe80::]/64``.
.. bro:type:: any
Used to bypass strong typing. For example, a function can take an
argument of type ``any`` when it may be of different types.
.. bro:type:: table
An associate array that maps from one set of values to another. The
values being mapped are termed the *index* or *indices* and the
result of the mapping is called the *yield*. Indexing into tables
is very efficient, and internally it is just a single hash table
lookup.
The table declaration syntax is::
table [ type^+ ] of type
where *type^+* is one or more types, separated by commas. For example:
.. code:: bro
global a: table[count] of string;
declares a table indexed by :bro:type:`count` values and yielding
:bro:type:`string` values. The yield type can also be more complex:
.. code:: bro
global a: table[count] of table[addr, port] of string;
which declares a table indexed by :bro:type:`count` and yielding
another :bro:type:`table` which is indexed by an :bro:type:`addr`
and :bro:type:`port` to yield a :bro:type:`string`.
Initialization of tables occurs by enclosing a set of initializers within
braces, for example:
.. code:: bro
global t: table[count] of string = {
[11] = "eleven",
[5] = "five",
};
Accessing table elements if provided by enclosing values within square
brackets (``[]``), for example:
.. code:: bro
t[13] = "thirteen";
And membership can be tested with ``in``:
.. code:: bro
if ( 13 in t )
...
Iterate over tables with a ``for`` loop:
.. code:: bro
local t: table[count] of string;
for ( n in t )
...
local services: table[addr, port] of string;
for ( [a, p] in services )
...
Remove individual table elements with ``delete``:
.. code:: bro
delete t[13];
Nothing happens if the element with value ``13`` isn't present in
the table.
Table size can be obtained by placing the table identifier between
vertical pipe (|) characters:
.. code:: bro
|t|
.. bro:type:: set
A set is like a :bro:type:`table`, but it is a collection of indices
that do not map to any yield value. They are declared with the
syntax::
set [ type^+ ]
where *type^+* is one or more types separated by commas.
Sets are initialized by listing elements enclosed by curly braces:
.. code:: bro
global s: set[port] = { 21/tcp, 23/tcp, 80/tcp, 443/tcp };
global s2: set[port, string] = { [21/tcp, "ftp"], [23/tcp, "telnet"] };
The types are explicitly shown in the example above, but they could
have been left to type inference.
Set membership is tested with ``in``:
.. code:: bro
if ( 21/tcp in s )
...
Elements are added with ``add``:
.. code:: bro
add s[22/tcp];
And removed with ``delete``:
.. code:: bro
delete s[21/tcp];
Set size can be obtained by placing the set identifier between
vertical pipe (|) characters:
.. code:: bro
|s|
.. bro:type:: vector
A vector is like a :bro:type:`table`, except it's always indexed by a
:bro:type:`count`. A vector is declared like:
.. code:: bro
global v: vector of string;
And can be initialized with the vector constructor:
.. code:: bro
global v: vector of string = vector("one", "two", "three");
Adding an element to a vector involves accessing/assigning it:
.. code:: bro
v[3] = "four"
Note how the vector indexing is 0-based.
Vector size can be obtained by placing the vector identifier between
vertical pipe (|) characters:
.. code:: bro
|v|
.. bro:type:: record
A ``record`` is a collection of values. Each value has a field name
and a type. Values do not need to have the same type and the types
have no restrictions. An example record type definition:
.. code:: bro
type MyRecordType: record {
c: count;
s: string &optional;
};
Access to a record field uses the dollar sign (``$``) operator:
.. code:: bro
global r: MyRecordType;
r$c = 13;
Record assignment can be done field by field or as a whole like:
.. code:: bro
r = [$c = 13, $s = "thirteen"];
When assigning a whole record value, all fields that are not
:bro:attr:`&optional` or have a :bro:attr:`&default` attribute must
be specified.
To test for existence of a field that is :bro:attr:`&optional`, use the
``?$`` operator:
.. code:: bro
if ( r?$s )
...
.. bro:type:: file
Bro supports writing to files, but not reading from them. For
example, declare, open, and write to a file and finally close it
like:
.. code:: bro
global f: file = open("myfile");
print f, "hello, world";
close(f);
Writing to files like this for logging usually isn't recommended, for better
logging support see :doc:`/logging`.
.. bro:type:: func
See :bro:type:`function`.
.. bro:type:: function
Function types in Bro are declared using::
function( argument* ): type
where *argument* is a (possibly empty) comma-separated list of
arguments, and *type* is an optional return type. For example:
.. code:: bro
global greeting: function(name: string): string;
Here ``greeting`` is an identifier with a certain function type.
The function body is not defined yet and ``greeting`` could even
have different function body values at different times. To define
a function including a body value, the syntax is like:
.. code:: bro
function greeting(name: string): string
{
return "Hello, " + name;
}
Note that in the definition above, it's not necessary for us to have
done the first (forward) declaration of ``greeting`` as a function
type, but when it is, the argument list and return type much match
exactly.
Function types don't need to have a name and can be assigned anonymously:
.. code:: bro
greeting = function(name: string): string { return "Hi, " + name; };
And finally, the function can be called like:
.. code:: bro
print greeting("Dave");
.. bro:type:: event
Event handlers are nearly identical in both syntax and semantics to
a :bro:type:`function`, with the two differences being that event
handlers have no return type since they never return a value, and
you cannot call an event handler. Instead of directly calling an
event handler from a script, event handler bodies are executed when
they are invoked by one of three different methods:
- From the event engine
When the event engine detects an event for which you have
defined a corresponding event handler, it queues an event for
that handler. The handler is invoked as soon as the event
engine finishes processing the current packet and flushing the
invocation of other event handlers that were queued first.
- With the ``event`` statement from a script
Immediately queuing invocation of an event handler occurs like:
.. code:: bro
event password_exposed(user, password);
This assumes that ``password_exposed`` was previously declared
as an event handler type with compatible arguments.
- Via the ``schedule`` expression in a script
This delays the invocation of event handlers until some time in
the future. For example:
.. code:: bro
schedule 5 secs { password_exposed(user, password) };
Multiple event handler bodies can be defined for the same event handler
identifier and the body of each will be executed in turn. Ordering
of execution can be influenced with :bro:attr:`&priority`.
Attributes
----------
Attributes occur at the end of type/event declarations and change their
behavior. The syntax is ``&key`` or ``&key=val``, e.g., ``type T:
set[count] &read_expire=5min`` or ``event foo() &priority=-3``. The Bro
scripting language supports the following built-in attributes.
.. bro:attr:: &optional
Allows a record field to be missing. For example the type ``record {
a: int, b: port &optional }`` could be instantiated both as
singleton ``[$a=127.0.0.1]`` or pair ``[$a=127.0.0.1, $b=80/tcp]``.
.. bro:attr:: &default
Uses a default value for a record field or container elements. For
example, ``table[int] of string &default="foo" }`` would create a
table that returns the :bro:type:`string` ``"foo"`` for any
non-existing index.
.. bro:attr:: &redef
Allows for redefinition of initial object values. This is typically
used with constants, for example, ``const clever = T &redef;`` would
allow the constant to be redefined at some later point during script
execution.
.. bro:attr:: &rotate_interval
Rotates a file after a specified interval.
.. bro:attr:: &rotate_size
Rotates a file after it has reached a given size in bytes.
.. bro:attr:: &add_func
.. TODO: needs to be documented.
.. bro:attr:: &delete_func
.. TODO: needs to be documented.
.. bro:attr:: &expire_func
Called right before a container element expires. The function's
first parameter is of the same type of the container and the second
parameter the same type of the container's index. The return
value is a :bro:type:`interval` indicating the amount of additional
time to wait before expiring the container element at the given
index (which will trigger another execution of this function).
.. bro:attr:: &read_expire
Specifies a read expiration timeout for container elements. That is,
the element expires after the given amount of time since the last
time it has been read. Note that a write also counts as a read.
.. bro:attr:: &write_expire
Specifies a write expiration timeout for container elements. That
is, the element expires after the given amount of time since the
last time it has been written.
.. bro:attr:: &create_expire
Specifies a creation expiration timeout for container elements. That
is, the element expires after the given amount of time since it has
been inserted into the container, regardless of any reads or writes.
.. bro:attr:: &persistent
Makes a variable persistent, i.e., its value is writen to disk (per
default at shutdown time).
.. bro:attr:: &synchronized
Synchronizes variable accesses across nodes. The value of a
``&synchronized`` variable is automatically propagated to all peers
when it changes.
.. bro:attr:: &postprocessor
.. TODO: needs to be documented.
.. bro:attr:: &encrypt
Encrypts files right before writing them to disk.
.. TODO: needs to be documented in more detail.
.. bro:attr:: &match
.. TODO: needs to be documented.
.. bro:attr:: &raw_output
Opens a file in raw mode, i.e., non-ASCII characters are not
escaped.
.. bro:attr:: &mergeable
Prefers set union to assignment for synchronized state. This
attribute is used in conjunction with :bro:attr:`&synchronized`
container types: when the same container is updated at two peers
with different value, the propagation of the state causes a race
condition, where the last update succeeds. This can cause
inconsistencies and can be avoided by unifying the two sets, rather
than merely overwriting the old value.
.. bro:attr:: &priority
Specifies the execution priority of an event handler. Higher values
are executed before lower ones. The default value is 0.
.. bro:attr:: &group
Groups event handlers such that those in the same group can be
jointly activated or deactivated.
.. bro:attr:: &log
Writes a record field to the associated log stream.
.. bro:attr:: &error_handler
.. TODO: needs documented
.. bro:attr:: (&tracked)
.. TODO: needs documented or removed if it's not used anywhere.

View file

@ -1,10 +1,20 @@
##! This is an example script that demonstrates how to document. Comments
##! of the form ``##!`` are for the script summary. The contents of
##! This is an example script that demonstrates documentation features.
##! Comments of the form ``##!`` are for the script summary. The contents of
##! these comments are transferred directly into the auto-generated
##! `reStructuredText <http://docutils.sourceforge.net/rst.html>`_
##! (reST) document's summary section.
##!
##! .. tip:: You can embed directives and roles within ``##``-stylized comments.
##!
##! There's also a custom role to reference any identifier node in
##! the Bro Sphinx domain that's good for "see alsos", e.g.
##!
##! See also: :bro:see:`Example::a_var`, :bro:see:`Example::ONE`,
##! :bro:see:`SSH::Info`
##!
##! And a custom directive does the equivalent references:
##!
##! .. bro:see:: Example::a_var Example::ONE SSH::Info
# Comments that use a single pound sign (#) are not significant to
# a script's auto-generated documentation, but ones that use a
@ -12,8 +22,8 @@
# field comments, it's necessary to disambiguate the field with
# which a comment associates: e.g. "##<" can be used on the same line
# as a field to signify the comment relates to it and not the
# following field. "##<" is not meant for general use, just
# record/enum fields.
# following field. "##<" can also be used more generally in any
# variable declarations to associate with the last-declared identifier.
#
# Generally, the auto-doc comments (##) are associated with the
# next declaration/identifier found in the script, but the doc framework
@ -141,7 +151,7 @@ export {
const an_option: set[addr, addr, string] &redef;
# default initialization will be self-documenting
const option_with_init = 0.01 secs &redef;
const option_with_init = 0.01 secs &redef; ##< More docs can be added here.
############## state variables ############
# right now, I'm defining this as any global
@ -173,6 +183,7 @@ export {
## Summarize "an_event" here.
## Give more details about "an_event" here.
## Example::an_event should not be confused as a parameter.
## name: describe the argument here
global an_event: event(name: string);

8
doc/scripts/index.rst Normal file
View file

@ -0,0 +1,8 @@
.. This is a stub doc to which broxygen appends during the build process
Index of All Individual Bro Scripts
===================================
.. toctree::
:maxdepth: 1

5
doc/scripts/internal.rst Normal file
View file

@ -0,0 +1,5 @@
.. This is a stub doc to which broxygen appends during the build process
Internal Scripts
================

View file

@ -1,7 +1,7 @@
.. This is a stub doc to which the build process can append.
.. This is a stub doc to which broxygen appends during the build process
Bro Script Packages
===================
Index of All Bro Script Packages
================================
Bro has the following script packages (e.g. collections of related scripts in
a common directory). If the package directory contains a ``__load__.bro``
@ -10,8 +10,3 @@ script, it supports being loaded in mass as a whole directory for convenience.
Packages/scripts in the ``base/`` directory are all loaded by default, while
ones in ``policy/`` provide functionality and customization options that are
more appropriate for users to decide whether they'd like to load it or not.
.. toctree::
:maxdepth: 1

View file

@ -1,64 +0,0 @@
// make literal blocks corresponding to identifier initial values
// hidden by default
$(document).ready(function() {
var showText='(Show Value)';
var hideText='(Hide Value)';
var is_visible = false;
// select field-list tables that come before a literal block
tables = $('.highlight-python').prev('table.docutils.field-list');
tables.find('th.field-name').filter(function(index) {
return $(this).html() == "Default :";
}).next().append('<a href="#" class="toggleLink">'+showText+'</a>');
// hide all literal blocks that follow a field-list table
tables.next('.highlight-python').hide();
// register handler for clicking a "toggle" link
$('a.toggleLink').click(function() {
is_visible = !is_visible;
$(this).html( (!is_visible) ? showText : hideText);
// the link is inside a <table><tbody><tr><td> and the next
// literal block after the table is the literal block that we want
// to show/hide
$(this).parent().parent().parent().parent().next('.highlight-python').slideToggle('fast');
// override default link behavior
return false;
});
});
// make "Private Interface" sections hidden by default
$(document).ready(function() {
var showText='Show Private Interface (for internal use)';
var hideText='Hide Private Interface';
var is_visible = false;
// insert show/hide links
$('#private-interface').children(":first-child").after('<a href="#" class="privateToggle">'+showText+'</a>');
// wrap all sub-sections in a new div that can be hidden/shown
$('#private-interface').children(".section").wrapAll('<div class="private" />');
// hide the given class
$('.private').hide();
// register handler for clicking a "toggle" link
$('a.privateToggle').click(function() {
is_visible = !is_visible;
$(this).html( (!is_visible) ? showText : hideText);
$('.private').slideToggle('fast');
// override default link behavior
return false;
});
});

View file

@ -1,5 +0,0 @@
{% extends "!layout.html" %}
{% block extrahead %}
<script type="text/javascript" src="{{ pathto('_static/showhide.js', 1) }}"></script>
{{ super() }}
{% endblock %}

View file

@ -1,5 +0,0 @@
.. This is a stub doc to which the build process can append.
Built-In Functions (BIFs)
=========================

View file

@ -1,118 +0,0 @@
Builtin Types and Attributes
============================
Types
-----
The Bro scripting language supports the following built-in types.
.. TODO: add documentation
.. bro:type:: void
.. bro:type:: bool
.. bro:type:: int
.. bro:type:: count
.. bro:type:: counter
.. bro:type:: double
.. bro:type:: time
.. bro:type:: interval
.. bro:type:: string
.. bro:type:: pattern
.. bro:type:: enum
.. bro:type:: timer
.. bro:type:: port
.. bro:type:: addr
.. bro:type:: net
.. bro:type:: subnet
.. bro:type:: any
.. bro:type:: table
.. bro:type:: union
.. bro:type:: record
.. bro:type:: types
.. bro:type:: func
.. bro:type:: file
.. bro:type:: vector
.. TODO: below are kind of "special cases" that bro knows about?
.. bro:type:: set
.. bro:type:: function
.. bro:type:: event
Attributes
----------
The Bro scripting language supports the following built-in attributes.
.. TODO: add documentation
.. bro:attr:: &optional
.. bro:attr:: &default
.. bro:attr:: &redef
.. bro:attr:: &rotate_interval
.. bro:attr:: &rotate_size
.. bro:attr:: &add_func
.. bro:attr:: &delete_func
.. bro:attr:: &expire_func
.. bro:attr:: &read_expire
.. bro:attr:: &write_expire
.. bro:attr:: &create_expire
.. bro:attr:: &persistent
.. bro:attr:: &synchronized
.. bro:attr:: &postprocessor
.. bro:attr:: &encrypt
.. bro:attr:: &match
.. bro:attr:: &disable_print_hook
.. bro:attr:: &raw_output
.. bro:attr:: &mergeable
.. bro:attr:: &priority
.. bro:attr:: &group
.. bro:attr:: &log
.. bro:attr:: (&tracked)

View file

@ -1,19 +0,0 @@
Common Documentation
====================
.. _common_port_analysis_doc:
Port Analysis
-------------
TODO: add some stuff here
.. _common_packet_filter_doc:
Packet Filter
-------------
TODO: add some stuff here
.. note:: Filters are only relevant when dynamic protocol detection (DPD)
is explicitly turned off (Bro release 1.6 enabled DPD by default).

View file

@ -1,23 +0,0 @@
.. Bro documentation master file
Welcome to Bro's documentation!
===============================
Contents:
.. toctree::
:maxdepth: 1
:glob:
common
builtins
internal
bifs
packages
scripts/index
Indices and tables
==================
* :ref:`genindex`
* :ref:`search`

View file

@ -1,5 +0,0 @@
.. This is a stub doc to which the build process can append.
Internal Scripts
================

View file

@ -1,6 +0,0 @@
Index of All Bro Script Documentation
=====================================
.. toctree::
:maxdepth: 1

View file

@ -3,7 +3,7 @@
Signatures
==========
.. class:: opening
.. rst-class:: opening
Bro relies primarily on its extensive scripting language for
defining and analyzing detection policies. In addition, however,
@ -34,7 +34,7 @@ Let's look at an example signature first:
This signature asks Bro to match the regular expression ``.*root`` on
all TCP connections going to port 80. When the signature triggers, Bro
will raise an event ``signature_match`` of the form:
will raise an event :bro:id:`signature_match` of the form:
.. code:: bro
@ -45,19 +45,24 @@ triggered the match, ``msg`` is the string specified by the
signature's event statement (``Found root!``), and data is the last
piece of payload which triggered the pattern match.
To turn such ``signature_match`` events into actual alarms, you can
load Bro's ``signature.bro`` script. This script contains a default
event handler that raises ``SensitiveSignature`` `Notices
<notices.html>`_ (as well as others; see the beginning of the script).
As signatures are independent of Bro's policy scripts, they are put
into their own file(s). There are two ways to specify which files
contain signatures: By using the ``-s`` flag when you invoke Bro, or
by extending the Bro variable ``signatures_files`` using the ``+=``
operator. If a signature file is given without a path, it is searched
along the normal ``BROPATH``. The default extension of the file name
is ``.sig``, and Bro appends that automatically when neccesary.
To turn such :bro:id:`signature_match` events into actual alarms, you can
load Bro's :doc:`/scripts/base/frameworks/signatures/main` script.
This script contains a default event handler that raises
:bro:enum:`Signatures::Sensitive_Signature` :doc:`Notices <notice>`
(as well as others; see the beginning of the script).
As signatures are independent of Bro's policy scripts, they are put into
their own file(s). There are three ways to specify which files contain
signatures: By using the ``-s`` flag when you invoke Bro, or by
extending the Bro variable :bro:id:`signature_files` using the ``+=``
operator, or by using the ``@load-sigs`` directive inside a Bro script.
If a signature file is given without a full path, it is searched for
along the normal ``BROPATH``. Additionally, the ``@load-sigs``
directive can be used to load signature files in a path relative to the
Bro script in which it's placed, e.g. ``@load-sigs ./mysigs.sig`` will
expect that signature file in the same directory as the Bro script. The
default extension of the file name is ``.sig``, and Bro appends that
automatically when necessary.
Signature language
==================
@ -78,9 +83,8 @@ Header Conditions
~~~~~~~~~~~~~~~~~
Header conditions limit the applicability of the signature to a subset
of traffic that contains matching packet headers. For TCP, this match
is performed only for the first packet of a connection. For other
protocols, it is done on each individual packet.
of traffic that contains matching packet headers. This type of matching
is performed only for the first packet of a connection.
There are pre-defined header conditions for some of the most used
header fields. All of them generally have the format ``<keyword> <cmp>
@ -90,14 +94,22 @@ one of ``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``; and
against. The following keywords are defined:
``src-ip``/``dst-ip <cmp> <address-list>``
Source and destination address, repectively. Addresses can be
given as IP addresses or CIDR masks.
Source and destination address, respectively. Addresses can be given
as IPv4 or IPv6 addresses or CIDR masks. For IPv6 addresses/masks
the colon-hexadecimal representation of the address must be enclosed
in square brackets (e.g. ``[fe80::1]`` or ``[fe80::0]/16``).
``src-port``/``dst-port`` ``<int-list>``
Source and destination port, repectively.
``src-port``/``dst-port <cmp> <int-list>``
Source and destination port, respectively.
``ip-proto tcp|udp|icmp``
IP protocol.
``ip-proto <cmp> tcp|udp|icmp|icmp6|ip|ip6``
IPv4 header's Protocol field or the Next Header field of the final
IPv6 header (i.e. either Next Header field in the fixed IPv6 header
if no extension headers are present or that field from the last
extension header in the chain). Note that the IP-in-IP forms of
tunneling are automatically decapsulated by default and signatures
apply to only the inner-most packet, so specifying ``ip`` or ``ip6``
is a no-op.
For lists of multiple values, they are sequentially compared against
the corresponding header field. If at least one of the comparisons
@ -111,30 +123,32 @@ condition can be defined either as
header <proto>[<offset>:<size>] [& <integer>] <cmp> <value-list>
This compares the value found at the given position of the packet
header with a list of values. ``offset`` defines the position of the
value within the header of the protocol defined by ``proto`` (which
can be ``ip``, ``tcp``, ``udp`` or ``icmp``). ``size`` is either 1, 2,
or 4 and specifies the value to have a size of this many bytes. If the
optional ``& <integer>`` is given, the packet's value is first masked
with the integer before it is compared to the value-list. ``cmp`` is
one of ``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``. ``value-list`` is
a list of comma-separated integers similar to those described above.
The integers within the list may be followed by an additional ``/
mask`` where ``mask`` is a value from 0 to 32. This corresponds to the
CIDR notation for netmasks and is translated into a corresponding
bitmask applied to the packet's value prior to the comparison (similar
to the optional ``& integer``).
This compares the value found at the given position of the packet header
with a list of values. ``offset`` defines the position of the value
within the header of the protocol defined by ``proto`` (which can be
``ip``, ``ip6``, ``tcp``, ``udp``, ``icmp`` or ``icmp6``). ``size`` is
either 1, 2, or 4 and specifies the value to have a size of this many
bytes. If the optional ``& <integer>`` is given, the packet's value is
first masked with the integer before it is compared to the value-list.
``cmp`` is one of ``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``.
``value-list`` is a list of comma-separated integers similar to those
described above. The integers within the list may be followed by an
additional ``/ mask`` where ``mask`` is a value from 0 to 32. This
corresponds to the CIDR notation for netmasks and is translated into a
corresponding bitmask applied to the packet's value prior to the
comparison (similar to the optional ``& integer``). IPv6 address values
are not allowed in the value-list, though you can still inspect any 1,
2, or 4 byte section of an IPv6 header using this keyword.
Putting all together, this is an example conditiation that is
equivalent to ``dst- ip == 1.2.3.4/16, 5.6.7.8/24``:
Putting it all together, this is an example condition that is
equivalent to ``dst-ip == 1.2.3.4/16, 5.6.7.8/24``:
.. code:: bro-sig
header ip[16:4] == 1.2.3.4/16, 5.6.7.8/24
Internally, the predefined header conditions are in fact just
short-cuts and mappend into a generic condition.
Note that the analogous example for IPv6 isn't currently possible since
4 bytes is the max width of a value that can be compared.
Content Conditions
~~~~~~~~~~~~~~~~~~
@ -143,7 +157,7 @@ Content conditions are defined by regular expressions. We
differentiate two kinds of content conditions: first, the expression
may be declared with the ``payload`` statement, in which case it is
matched against the raw payload of a connection (for reassembled TCP
streams) or of a each packet (for ICMP, UDP, and non-reassembled TCP).
streams) or of each packet (for ICMP, UDP, and non-reassembled TCP).
Second, it may be prefixed with an analyzer-specific label, in which
case the expression is matched against the data as extracted by the
corresponding analyzer.
@ -208,7 +222,7 @@ To define dependencies between signatures, there are two conditions:
``requires-reverse-signature [!] <id>``
Similar to ``requires-signature``, but ``id`` has to match for the
opposite direction of the same connection, compared the current
opposite direction of the same connection, compared to the current
signature. This allows to model the notion of requests and
replies.
@ -224,20 +238,10 @@ matched. The following context conditions are defined:
confirming the match. If false is returned, no signature match is
going to be triggered. The function has to be of type ``function
cond(state: signature_state, data: string): bool``. Here,
``content`` may contain the most recent content chunk available at
``data`` may contain the most recent content chunk available at
the time the signature was matched. If no such chunk is available,
``content`` will be the empty string. ``signature_state`` is
defined as follows:
.. code:: bro
type signature_state: record {
id: string; # ID of the signature
conn: connection; # Current connection
is_orig: bool; # True if current endpoint is originator
payload_size: count; # Payload size of the first packet
};
``data`` will be the empty string. See :bro:type:`signature_state`
for its definition.
``payload-size <cmp> <integer>``
Compares the integer to the size of the payload of a packet. For
@ -265,7 +269,7 @@ Actions define what to do if a signature matches. Currently, there are
two actions defined:
``event <string>``
Raises a ``signature_match`` event. The event handler has the
Raises a :bro:id:`signature_match` event. The event handler has the
following type:
.. code:: bro
@ -338,11 +342,11 @@ Things to keep in mind when writing signatures
signature engine and can be matched with ``\r`` and ``\n``,
respectively. Generally, Bro follows `flex's regular expression
syntax
<http://www.gnu.org/software/flex/manual/html_chapter/flex_7.html>`_.
See the DPD signatures in ``policy/sigs/dpd.bro`` for some examples
<http://flex.sourceforge.net/manual/Patterns.html>`_.
See the DPD signatures in ``base/frameworks/dpd/dpd.sig`` for some examples
of fairly complex payload patterns.
* The data argument of the ``signature_match`` handler might not carry
* The data argument of the :bro:id:`signature_match` handler might not carry
the full text matched by the regular expression. Bro performs the
matching incrementally as packets come in; when the signature
eventually fires, it can only pass on the most recent chunk of data.

View file

@ -3,7 +3,7 @@
Upgrading From Bro 1.5 to 2.0
=============================
.. class:: opening
.. rst-class:: opening
This guide details differences between Bro versions 1.5 and 2.0
that may be important for users to know as they work on updating
@ -39,7 +39,7 @@ version. The two rules of thumb are:
if you need help.
Below we summarize changes from 1.x to 2.x in more detail. This list
isn't complete, see the `CHANGES <{{git('bro:CHANGES', 'txt')}}>`_ file in the
isn't complete, see the :download:`CHANGES <CHANGES>` file in the
distribution for the full story.
Default Scripts
@ -55,13 +55,13 @@ renamed to ``scripts/`` and contains major subdirectories ``base/``,
further.
The contents of the new ``scripts/`` directory, like the old/flat
``policy/`` still gets installed under under the ``share/bro``
``policy/`` still gets installed under the ``share/bro``
subdirectory of the installation prefix path just like previous
versions. For example, if Bro was compiled like ``./configure
--prefix=/usr/local/bro && make && make install``, then the script
hierarchy can be found in ``/usr/local/bro/share/bro``.
THe main
The main
subdirectories of that hierarchy are as follows:
- ``base/`` contains all scripts that are loaded by Bro by default
@ -131,8 +131,8 @@ Logging Framework
endpoint.
- The new logging framework makes it possible to extend, customize,
and filter logs very easily. See `the logging framework
<{{git('bro:doc/logging.rst')}}>`_ more information on usage.
and filter logs very easily. See the :doc:`logging framework <logging>`
for more information on usage.
- A common pattern found in the new scripts is to store logging stream
records for protocols inside the ``connection`` records so that
@ -155,8 +155,7 @@ Notice Framework
The way users interact with "notices" has changed significantly in
order to make it easier to define a site policy and more extensible
for adding customized actions. See the `the notice framework
<{{git('bro:doc/notice.rst')}}>`_.
for adding customized actions. See the :doc:`notice framework <notice>`.
New Default Settings
@ -169,10 +168,6 @@ New Default Settings
are loaded. See ``PacketFilter::all_packets`` for how to revert to old
behavior.
- By default, Bro now sets a libpcap snaplen of 65535. Depending on
the OS, this may have performance implications and you can use the
``--snaplen`` option to change the value.
API Changes
-----------
@ -198,7 +193,7 @@ Variable Naming
- Identifiers may have been renamed to conform to new `scripting
conventions
<{{docroot}}/development/script-conventions.html>`_
<http://www.bro-ids.org/development/script-conventions.html>`_
BroControl
@ -214,8 +209,8 @@ live analysis.
BroControl now has an extensive plugin interface for adding new
commands and options. Note that this is still considered experimental.
We have remove the ``analysis`` command, and BroControl does currently
not not send daily alarm summaries anymore (this may be restored
We have removed the ``analysis`` command, and BroControl currently
does not send daily alarm summaries anymore (this may be restored
later).
Removed Functionality
@ -238,11 +233,11 @@ Development Infrastructure
==========================
Bro development has moved from using SVN to Git for revision control.
Users that like to use the latest Bro developments by checking it out
Users that want to use the latest Bro development snapshot by checking it out
from the source repositories should see the `development process
<{{docroot}}/development/process.html>`_. Note that all the various
sub-components now reside on their own repositories. However, the
top-level Bro repository includes them as git submodules so it's easu
<http://www.bro-ids.org/development/process.html>`_. Note that all the various
sub-components now reside in their own repositories. However, the
top-level Bro repository includes them as git submodules so it's easy
to check them all out simultaneously.
Bro now uses `CMake <http://www.cmake.org>`_ for its build system so

View file

@ -5,7 +5,7 @@
# version of CMake is required to obtain consistency, but can be increased
# as new versions of CMake come out that also produce working packages.
CMAKE_PACK_REQ="cmake version 2.8.4"
CMAKE_PACK_REQ="cmake version 2.8.6"
CMAKE_VER=`cmake -version`
if [ "${CMAKE_VER}" != "${CMAKE_PACK_REQ}" ]; then

View file

@ -27,21 +27,21 @@ cd ..
# Minimum Bro
./configure --prefix=${prefix} --disable-broccoli --disable-broctl \
--pkg-name-prefix=Bro --binary-package
--pkg-name-prefix=Bro-minimal --binary-package
( cd build && make package )
# Full Bro package
./configure --prefix=${prefix} --pkg-name-prefix=Bro-all --binary-package
./configure --prefix=${prefix} --pkg-name-prefix=Bro --binary-package
( cd build && make package )
# Broccoli
cd aux/broccoli
./configure --prefix=${prefix} --binary-package
( cd build && make package && mv Broccoli*.deb ../../../build/ )
( cd build && make package && mv *.deb ../../../build/ )
cd ../..
# Broctl
cd aux/broctl
./configure --prefix=${prefix} --binary-package
( cd build && make package && mv Broctl*.deb ../../../build/ )
( cd build && make package && mv *.deb ../../../build/ )
cd ../..

View file

@ -3,7 +3,13 @@
# This script creates binary packages for Mac OS X.
# They can be found in ../build/ after running.
./check-cmake || { exit 1; }
cmake -P /dev/stdin << "EOF"
if ( ${CMAKE_VERSION} VERSION_LESS 2.8.9 )
message(FATAL_ERROR "CMake >= 2.8.9 required to build package")
endif ()
EOF
[ $? -ne 0 ] && exit 1;
type sw_vers > /dev/null 2>&1 || {
echo "Unable to get Mac OS X version" >&2;
@ -34,26 +40,26 @@ prefix=/opt/bro
cd ..
# Minimum Bro
CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
--disable-broccoli --disable-broctl --pkg-name-prefix=Bro \
CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
--disable-broccoli --disable-broctl --pkg-name-prefix=Bro-minimal \
--binary-package
( cd build && make package )
# Full Bro package
CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
--pkg-name-prefix=Bro-all --binary-package
CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
--pkg-name-prefix=Bro --binary-package
( cd build && make package )
# Broccoli
cd aux/broccoli
CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
--binary-package
( cd build && make package && mv Broccoli*.dmg ../../../build/ )
( cd build && make package && mv *.dmg ../../../build/ )
cd ../..
# Broctl
cd aux/broctl
CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
--binary-package
( cd build && make package && mv Broctl*.dmg ../../../build/ )
( cd build && make package && mv *.dmg ../../../build/ )
cd ../..

View file

@ -20,21 +20,21 @@ cd ..
# Minimum Bro
./configure --prefix=${prefix} --disable-broccoli --disable-broctl \
--pkg-name-prefix=Bro --binary-package
--pkg-name-prefix=Bro-minimal --binary-package
( cd build && make package )
# Full Bro package
./configure --prefix=${prefix} --pkg-name-prefix=Bro-all --binary-package
./configure --prefix=${prefix} --pkg-name-prefix=Bro --binary-package
( cd build && make package )
# Broccoli
cd aux/broccoli
./configure --prefix=${prefix} --binary-package
( cd build && make package && mv Broccoli*.rpm ../../../build/ )
( cd build && make package && mv *.rpm ../../../build/ )
cd ../..
# Broctl
cd aux/broctl
./configure --prefix=${prefix} --binary-package
( cd build && make package && mv Broctl*.rpm ../../../build/ )
( cd build && make package && mv *.rpm ../../../build/ )
cd ../..

View file

@ -9,10 +9,10 @@ redef peer_description = Cluster::node;
# Add a cluster prefix.
@prefixes += cluster
## If this script isn't found anywhere, the cluster bombs out.
## Loading the cluster framework requires that a script by this name exists
## somewhere in the BROPATH. The only thing in the file should be the
## cluster definition in the :bro:id:`Cluster::nodes` variable.
# If this script isn't found anywhere, the cluster bombs out.
# Loading the cluster framework requires that a script by this name exists
# somewhere in the BROPATH. The only thing in the file should be the
# cluster definition in the :bro:id:`Cluster::nodes` variable.
@load cluster-layout
@if ( Cluster::node in Cluster::nodes )
@ -28,17 +28,14 @@ redef Communication::listen_port = Cluster::nodes[Cluster::node]$p;
@if ( Cluster::local_node_type() == Cluster::MANAGER )
@load ./nodes/manager
@load site/local-manager
@endif
@if ( Cluster::local_node_type() == Cluster::PROXY )
@load ./nodes/proxy
@load site/local-proxy
@endif
@if ( Cluster::local_node_type() == Cluster::WORKER )
@load ./nodes/worker
@load site/local-worker
@endif
@endif

View file

@ -1,21 +1,45 @@
##! A framework for establishing and controlling a cluster of Bro instances.
##! In order to use the cluster framework, a script named
##! ``cluster-layout.bro`` must exist somewhere in Bro's script search path
##! which has a cluster definition of the :bro:id:`Cluster::nodes` variable.
##! The ``CLUSTER_NODE`` environment variable or :bro:id:`Cluster::node`
##! must also be sent and the cluster framework loaded as a package like
##! ``@load base/frameworks/cluster``.
@load base/frameworks/control
module Cluster;
export {
## The cluster logging stream identifier.
redef enum Log::ID += { LOG };
## The record type which contains the column fields of the cluster log.
type Info: record {
## The time at which a cluster message was generated.
ts: time;
## A message indicating information about the cluster's operation.
message: string;
} &log;
## Types of nodes that are allowed to participate in the cluster
## configuration.
type NodeType: enum {
## A dummy node type indicating the local node is not operating
## within a cluster.
NONE,
## A node type which is allowed to view/manipulate the configuration
## of other nodes in the cluster.
CONTROL,
## A node type responsible for log and policy management.
MANAGER,
## A node type for relaying worker node communication and synchronizing
## worker node state.
PROXY,
## The node type doing all the actual traffic analysis.
WORKER,
## A node acting as a traffic recorder using the
## `Time Machine <http://tracker.bro-ids.org/time-machine>`_ software.
TIME_MACHINE,
};
@ -49,30 +73,41 @@ export {
## Record type to indicate a node in a cluster.
type Node: record {
## Identifies the type of cluster node in this node's configuration.
node_type: NodeType;
## The IP address of the cluster node.
ip: addr;
## If the *ip* field is a non-global IPv6 address, this field
## can specify a particular :rfc:`4007` ``zone_id``.
zone_id: string &default="";
## The port to which the this local node can connect when
## establishing communication.
p: port;
## Identifier for the interface a worker is sniffing.
interface: string &optional;
## Manager node this node uses. For workers and proxies.
## Name of the manager node this node uses. For workers and proxies.
manager: string &optional;
## Proxy node this node uses. For workers and managers.
## Name of the proxy node this node uses. For workers and managers.
proxy: string &optional;
## Worker nodes that this node connects with. For managers and proxies.
## Names of worker nodes that this node connects with.
## For managers and proxies.
workers: set[string] &optional;
## Name of a time machine node with which this node connects.
time_machine: string &optional;
};
## This function can be called at any time to determine if the cluster
## framework is being enabled for this run.
##
## Returns: True if :bro:id:`Cluster::node` has been set.
global is_enabled: function(): bool;
## This function can be called at any time to determine what type of
## cluster node the current Bro instance is going to be acting as.
## If :bro:id:`Cluster::is_enabled` returns false, then
## :bro:enum:`Cluster::NONE` is returned.
##
## Returns: The :bro:type:`Cluster::NodeType` the calling node acts as.
global local_node_type: function(): NodeType;
## This gives the value for the number of workers currently connected to,

View file

@ -1,3 +1,7 @@
##! Redefines the options common to all proxy nodes within a Bro cluster.
##! In particular, proxies are not meant to produce logs locally and they
##! do not forward events anywhere, they mainly synchronize state between
##! worker nodes.
@prefixes += cluster-proxy

View file

@ -1,3 +1,7 @@
##! Redefines some options common to all worker nodes within a Bro cluster.
##! In particular, worker nodes do not produce logs locally, instead they
##! send them off to a manager node for processing.
@prefixes += cluster-worker
## Don't do any local logging.

View file

@ -1,3 +1,6 @@
##! This script establishes communication among all nodes in a cluster
##! as defined by :bro:id:`Cluster::nodes`.
@load ./main
@load base/frameworks/communication
@ -16,23 +19,26 @@ event bro_init() &priority=9
# Connections from the control node for runtime control and update events.
# Every node in a cluster is eligible for control from this host.
if ( n$node_type == CONTROL )
Communication::nodes["control"] = [$host=n$ip, $connect=F,
$class="control", $events=control_events];
Communication::nodes["control"] = [$host=n$ip, $zone_id=n$zone_id,
$connect=F, $class="control",
$events=control_events];
if ( me$node_type == MANAGER )
{
if ( n$node_type == WORKER && n$manager == node )
Communication::nodes[i] =
[$host=n$ip, $connect=F,
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
$class=i, $events=worker2manager_events, $request_logs=T];
if ( n$node_type == PROXY && n$manager == node )
Communication::nodes[i] =
[$host=n$ip, $connect=F,
[$host=n$ip, $zone_id=n$zone_id, $connect=F,
$class=i, $events=proxy2manager_events, $request_logs=T];
if ( n$node_type == TIME_MACHINE && me?$time_machine && me$time_machine == i )
Communication::nodes["time-machine"] = [$host=nodes[i]$ip, $p=nodes[i]$p,
Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T, $retry=1min,
$events=tm2manager_events];
}
@ -41,7 +47,8 @@ event bro_init() &priority=9
{
if ( n$node_type == WORKER && n$proxy == node )
Communication::nodes[i] =
[$host=n$ip, $connect=F, $class=i, $events=worker2proxy_events];
[$host=n$ip, $zone_id=n$zone_id, $connect=F, $class=i,
$sync=T, $auth=T, $events=worker2proxy_events];
# accepts connections from the previous one.
# (This is not ideal for setups with many proxies)
@ -50,16 +57,18 @@ event bro_init() &priority=9
{
if ( n?$proxy )
Communication::nodes[i]
= [$host=n$ip, $p=n$p,
= [$host=n$ip, $zone_id=n$zone_id, $p=n$p,
$connect=T, $auth=F, $sync=T, $retry=1mins];
else if ( me?$proxy && me$proxy == i )
Communication::nodes[me$proxy]
= [$host=nodes[i]$ip, $connect=F, $auth=T, $sync=T];
= [$host=nodes[i]$ip, $zone_id=nodes[i]$zone_id,
$connect=F, $auth=T, $sync=T];
}
# Finally the manager, to send it status updates.
if ( n$node_type == MANAGER && me$manager == i )
Communication::nodes["manager"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T, $retry=1mins,
$class=node,
@ -69,6 +78,7 @@ event bro_init() &priority=9
{
if ( n$node_type == MANAGER && me$manager == i )
Communication::nodes["manager"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T, $retry=1mins,
$class=node,
@ -76,6 +86,7 @@ event bro_init() &priority=9
if ( n$node_type == PROXY && me$proxy == i )
Communication::nodes["proxy"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T, $retry=1mins,
$sync=T, $class=node,
@ -84,6 +95,7 @@ event bro_init() &priority=9
if ( n$node_type == TIME_MACHINE &&
me?$time_machine && me$time_machine == i )
Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T,
$retry=1min,

View file

@ -1,34 +1,62 @@
##! Connect to remote Bro or Broccoli instances to share state and/or transfer
##! events.
##! Facilitates connecting to remote Bro or Broccoli instances to share state
##! and/or transfer events.
@load base/frameworks/packet-filter
@load base/utils/addrs
module Communication;
export {
## The communication logging stream identifier.
redef enum Log::ID += { LOG };
## Which interface to listen on (0.0.0.0 for any interface).
## Which interface to listen on. The addresses ``0.0.0.0`` and ``[::]``
## are wildcards.
const listen_interface = 0.0.0.0 &redef;
## Which port to listen on.
const listen_port = 47757/tcp &redef;
## This defines if a listening socket should use SSL.
const listen_ssl = F &redef;
## Default compression level. Compression level is 0-9, with 0 = no
## Defines if a listening socket can bind to IPv6 addresses.
const listen_ipv6 = F &redef;
## If :bro:id:`Communication::listen_interface` is a non-global
## IPv6 address and requires a specific :rfc:`4007` ``zone_id``,
## it can be specified here.
const listen_ipv6_zone_id = "" &redef;
## Defines the interval at which to retry binding to
## :bro:id:`Communication::listen_interface` on
## :bro:id:`Communication::listen_port` if it's already in use.
const listen_retry = 30 secs &redef;
## Default compression level. Compression level is 0-9, with 0 = no
## compression.
global compression_level = 0 &redef;
## A record type containing the column fields of the communication log.
type Info: record {
## The network time at which a communication event occurred.
ts: time &log;
## The peer name (if any) with which a communication event is concerned.
peer: string &log &optional;
## Where the communication event message originated from, that is,
## either from the scripting layer or inside the Bro process.
src_name: string &log &optional;
## .. todo:: currently unused.
connected_peer_desc: string &log &optional;
## .. todo:: currently unused.
connected_peer_addr: addr &log &optional;
## .. todo:: currently unused.
connected_peer_port: port &log &optional;
## The severity of the communication event message.
level: string &log &optional;
## A message describing the communication event between Bro or
## Broccoli instances.
message: string &log;
};
@ -38,7 +66,11 @@ export {
type Node: record {
## Remote address.
host: addr;
## If the *host* field is a non-global IPv6 address, this field
## can specify a particular :rfc:`4007` ``zone_id``.
zone_id: string &optional;
## Port of the remote Bro communication endpoint if we are initiating
## the connection based on the :bro:id:`connect` field.
p: port &optional;
@ -77,7 +109,7 @@ export {
auth: bool &default = F;
## If not set, no capture filter is sent.
## If set to "", the default cature filter is sent.
## If set to "", the default capture filter is sent.
capture_filter: string &optional;
## Whether to use SSL-based communication.
@ -88,7 +120,7 @@ export {
## The remote peer.
peer: event_peer &optional;
## Indicates the status of the node.
connected: bool &default = F;
};
@ -96,11 +128,25 @@ export {
## The table of Bro or Broccoli nodes that Bro will initiate connections
## to or respond to connections from.
global nodes: table[string] of Node &redef;
## A table of peer nodes for which this node issued a
## :bro:id:`Communication::connect_peer` call but with which a connection
## has not yet been established or with which a connection has been
## closed and is currently in the process of retrying to establish.
## When a connection is successfully established, the peer is removed
## from the table.
global pending_peers: table[peer_id] of Node;
## A table of peer nodes for which this node has an established connection.
## Peers are automatically removed if their connection is closed and
## automatically added back if a connection is re-established later.
global connected_peers: table[peer_id] of Node;
## Connect to nodes[node], independent of its "connect" flag.
## Connect to a node in :bro:id:`Communication::nodes` independent
## of its "connect" flag.
##
## peer: the string used to index a particular node within the
## :bro:id:`Communication::nodes` table.
global connect_peer: function(peer: string);
}
@ -117,7 +163,7 @@ event bro_init() &priority=5
function do_script_log_common(level: count, src: count, msg: string)
{
Log::write(Communication::LOG, [$ts = network_time(),
Log::write(Communication::LOG, [$ts = network_time(),
$level = (level == REMOTE_LOG_INFO ? "info" : "error"),
$src_name = src_names[src],
$peer = get_event_peer()$descr,
@ -130,6 +176,13 @@ event remote_log(level: count, src: count, msg: string)
do_script_log_common(level, src, msg);
}
# This is a core generated event.
event remote_log_peer(p: event_peer, level: count, src: count, msg: string)
{
local rmsg = fmt("[#%d/%s:%d] %s", p$id, addr_to_uri(p$host), p$p, msg);
do_script_log_common(level, src, rmsg);
}
function do_script_log(p: event_peer, msg: string)
{
do_script_log_common(REMOTE_LOG_INFO, REMOTE_SRC_SCRIPT, msg);
@ -144,10 +197,11 @@ function connect_peer(peer: string)
p = node$p;
local class = node?$class ? node$class : "";
local id = connect(node$host, p, class, node$retry, node$ssl);
local zone_id = node?$zone_id ? node$zone_id : "";
local id = connect(node$host, zone_id, p, class, node$retry, node$ssl);
if ( id == PEER_ID_NONE )
Log::write(Communication::LOG, [$ts = network_time(),
Log::write(Communication::LOG, [$ts = network_time(),
$peer = get_event_peer()$descr,
$message = "can't trigger connect"]);
pending_peers[id] = node;
@ -286,7 +340,7 @@ event bro_init() &priority = -10 # let others modify nodes
{
if ( |nodes| > 0 )
enable_communication();
for ( tag in nodes )
{
if ( ! nodes[tag]$connect )

View file

@ -1,43 +1,34 @@
##! This is a utility script that sends the current values of all &redef'able
##! consts to a remote Bro then sends the :bro:id:`configuration_update` event
##! and terminates processing.
##!
##! Intended to be used from the command line like this when starting a controller::
##!
##! bro <scripts> frameworks/control/controller Control::host=<host_addr> Control::port=<host_port> Control::cmd=<command> [Control::arg=<arg>]
##!
##! A controllee only needs to load the controllee script in addition
##! to the specific analysis scripts desired. It may also need a node
##! configured as a controller node in the communications nodes configuration::
##!
##! bro <scripts> frameworks/control/controllee
##!
##! To use the framework as a controllee, it only needs to be loaded and
##! the controlled node need to accept all events in the "Control::" namespace
##! from the host where the control actions will be performed from along with
##! using the "control" class.
##! The control framework provides the foundation for providing "commands"
##! that can be taken remotely at runtime to modify a running Bro instance
##! or collect information from the running instance.
module Control;
export {
## This is the address of the host that will be controlled.
## The address of the host that will be controlled.
const host = 0.0.0.0 &redef;
## This is the port of the host that will be controlled.
## The port of the host that will be controlled.
const host_port = 0/tcp &redef;
## This is the command that is being done. It's typically set on the
## command line and influences whether this instance starts up as a
## controller or controllee.
## If :bro:id:`Control::host` is a non-global IPv6 address and
## requires a specific :rfc:`4007` ``zone_id``, it can be set here.
const zone_id = "" &redef;
## The command that is being done. It's typically set on the
## command line.
const cmd = "" &redef;
## This can be used by commands that take an argument.
const arg = "" &redef;
## Events that need to be handled by controllers.
const controller_events = /Control::.*_request/ &redef;
## Events that need to be handled by controllees.
const controllee_events = /Control::.*_response/ &redef;
## These are the commands that can be given on the command line for
## The commands that can currently be given on the command line for
## remote control.
const commands: set[string] = {
"id_value",
@ -45,15 +36,15 @@ export {
"net_stats",
"configuration_update",
"shutdown",
};
} &redef;
## Variable IDs that are to be ignored by the update process.
const ignore_ids: set[string] = {
};
const ignore_ids: set[string] = { };
## Event for requesting the value of an ID (a variable).
global id_value_request: event(id: string);
## Event for returning the value of an ID after an :bro:id:`id_request` event.
## Event for returning the value of an ID after an
## :bro:id:`Control::id_value_request` event.
global id_value_response: event(id: string, val: string);
## Requests the current communication status.
@ -68,7 +59,8 @@ export {
## Inform the remote Bro instance that it's configuration may have been updated.
global configuration_update_request: event();
## This event is a wrapper and alias for the :bro:id:`configuration_update_request` event.
## This event is a wrapper and alias for the
## :bro:id:`Control::configuration_update_request` event.
## This event is also a primary hooking point for the control framework.
global configuration_update: event();
## Message in response to a configuration update request.

View file

@ -80,15 +80,15 @@ signature irc_server_reply {
tcp-state responder
}
signature irc_sig3 {
signature irc_server_to_server1 {
ip-proto == tcp
payload /(.*\x0a)*(\x20)*[Ss][Ee][Rr][Vv][Ee][Rr](\x20)+.+\x0a/
payload /(|.*[\r\n]) *[Ss][Ee][Rr][Vv][Ee][Rr] +[^ ]+ +[0-9]+ +:.+[\r\n]/
}
signature irc_sig4 {
signature irc_server_to_server2 {
ip-proto == tcp
payload /(.*\x0a)*(\x20)*[Ss][Ee][Rr][Vv][Ee][Rr](\x20)+.+\x0a/
requires-reverse-signature irc_sig3
payload /(|.*[\r\n]) *[Ss][Ee][Rr][Vv][Ee][Rr] +[^ ]+ +[0-9]+ +:.+[\r\n]/
requires-reverse-signature irc_server_to_server1
enable "irc"
}
@ -149,3 +149,64 @@ signature dpd_ssl_client {
payload /^(\x16\x03[\x00\x01\x02]..\x01...\x03[\x00\x01\x02]|...?\x01[\x00\x01\x02][\x02\x03]).*/
tcp-state originator
}
signature dpd_ayiya {
ip-proto = udp
payload /^..\x11\x29/
enable "ayiya"
}
signature dpd_teredo {
ip-proto = udp
payload /^(\x00\x00)|(\x00\x01)|([\x60-\x6f])/
enable "teredo"
}
signature dpd_socks4_client {
ip-proto == tcp
# '32' is a rather arbitrary max length for the user name.
payload /^\x04[\x01\x02].{0,32}\x00/
tcp-state originator
}
signature dpd_socks4_server {
ip-proto == tcp
requires-reverse-signature dpd_socks4_client
payload /^\x00[\x5a\x5b\x5c\x5d]/
tcp-state responder
enable "socks"
}
signature dpd_socks4_reverse_client {
ip-proto == tcp
# '32' is a rather arbitrary max length for the user name.
payload /^\x04[\x01\x02].{0,32}\x00/
tcp-state responder
}
signature dpd_socks4_reverse_server {
ip-proto == tcp
requires-reverse-signature dpd_socks4_reverse_client
payload /^\x00[\x5a\x5b\x5c\x5d]/
tcp-state originator
enable "socks"
}
signature dpd_socks5_client {
ip-proto == tcp
# Watch for a few authentication methods to reduce false positives.
payload /^\x05.[\x00\x01\x02]/
tcp-state originator
}
signature dpd_socks5_server {
ip-proto == tcp
requires-reverse-signature dpd_socks5_client
# Watch for a single authentication method to be chosen by the server or
# the server to indicate the no authentication is required.
payload /^\x05(\x00|\x01[\x00\x01\x02])/
tcp-state responder
enable "socks"
}

View file

@ -3,18 +3,19 @@
module DPD;
## Add the DPD signatures to the signature framework.
redef signature_files += "base/frameworks/dpd/dpd.sig";
@load-sigs ./dpd.sig
export {
## Add the DPD logging stream identifier.
redef enum Log::ID += { LOG };
## The record type defining the columns to log in the DPD logging stream.
type Info: record {
## Timestamp for when protocol analysis failed.
ts: time &log;
## Connection unique ID.
uid: string &log;
## Connection ID.
## Connection ID containing the 4-tuple which identifies endpoints.
id: conn_id &log;
## Transport protocol for the violation.
proto: transport_proto &log;
@ -103,5 +104,8 @@ event protocol_violation(c: connection, atype: count, aid: count,
reason: string) &priority=-5
{
if ( c?$dpd )
{
Log::write(DPD::LOG, c$dpd);
delete c$dpd;
}
}

View file

@ -0,0 +1,5 @@
@load ./main
@load ./readers/ascii
@load ./readers/raw
@load ./readers/benchmark

View file

@ -0,0 +1,158 @@
##! The input framework provides a way to read previously stored data either
##! as an event stream or into a bro table.
module Input;
export {
## The default input reader used. Defaults to `READER_ASCII`.
const default_reader = READER_ASCII &redef;
## The default reader mode used. Defaults to `MANUAL`.
const default_mode = MANUAL &redef;
## Flag that controls if the input framework accepts records
## that contain types that are not supported (at the moment
## file and function). If true, the input framework will
## warn in these cases, but continue. If false, it will
## abort. Defaults to false (abort)
const accept_unsupported_types = F &redef;
## TableFilter description type used for the `table` method.
type TableDescription: record {
## Common definitions for tables and events
## String that allows the reader to find the source.
## For `READER_ASCII`, this is the filename.
source: string;
## Reader to use for this stream
reader: Reader &default=default_reader;
## Read mode to use for this stream
mode: Mode &default=default_mode;
## Descriptive name. Used to remove a stream at a later time
name: string;
# Special definitions for tables
## Table which will receive the data read by the input framework
destination: any;
## Record that defines the values used as the index of the table
idx: any;
## Record that defines the values used as the elements of the table
## If val is undefined, destination has to be a set.
val: any &optional;
## Defines if the value of the table is a record (default), or a single value. Val
## can only contain one element when this is set to false.
want_record: bool &default=T;
## The event that is raised each time a value is added to, changed in or removed
## from the table. The event will receive an Input::Event enum as the first
## argument, the idx record as the second argument and the value (record) as the
## third argument.
ev: any &optional; # event containing idx, val as values.
## Predicate function that can decide if an insertion, update or removal should
## really be executed. Parameters are the same as for the event. If true is
## returned, the update is performed. If false is returned, it is skipped.
pred: function(typ: Input::Event, left: any, right: any): bool &optional;
## A key/value table that will be passed on the reader.
## Interpretation of the values is left to the writer, but
## usually they will be used for configuration purposes.
config: table[string] of string &default=table();
};
## EventFilter description type used for the `event` method.
type EventDescription: record {
## Common definitions for tables and events
## String that allows the reader to find the source.
## For `READER_ASCII`, this is the filename.
source: string;
## Reader to use for this steam
reader: Reader &default=default_reader;
## Read mode to use for this stream
mode: Mode &default=default_mode;
## Descriptive name. Used to remove a stream at a later time
name: string;
# Special definitions for events
## Record describing the fields to be retrieved from the source input.
fields: any;
## If want_record if false, the event receives each value in fields as a separate argument.
## If it is set to true (default), the event receives all fields in a single record value.
want_record: bool &default=T;
## The event that is raised each time a new line is received from the reader.
## The event will receive an Input::Event enum as the first element, and the fields as the following arguments.
ev: any;
## A key/value table that will be passed on the reader.
## Interpretation of the values is left to the writer, but
## usually they will be used for configuration purposes.
config: table[string] of string &default=table();
};
## Create a new table input from a given source. Returns true on success.
##
## description: `TableDescription` record describing the source.
global add_table: function(description: Input::TableDescription) : bool;
## Create a new event input from a given source. Returns true on success.
##
## description: `TableDescription` record describing the source.
global add_event: function(description: Input::EventDescription) : bool;
## Remove a input stream. Returns true on success and false if the named stream was
## not found.
##
## id: string value identifying the stream to be removed
global remove: function(id: string) : bool;
## Forces the current input to be checked for changes.
## Returns true on success and false if the named stream was not found
##
## id: string value identifying the stream
global force_update: function(id: string) : bool;
## Event that is called, when the end of a data source has been reached, including
## after an update.
global end_of_data: event(name: string, source:string);
}
@load base/input.bif
module Input;
function add_table(description: Input::TableDescription) : bool
{
return __create_table_stream(description);
}
function add_event(description: Input::EventDescription) : bool
{
return __create_event_stream(description);
}
function remove(id: string) : bool
{
return __remove_stream(id);
}
function force_update(id: string) : bool
{
return __force_update(id);
}

View file

@ -0,0 +1,21 @@
##! Interface for the ascii input reader.
##!
##! The defaults are set to match Bro's ASCII output.
module InputAscii;
export {
## Separator between fields.
## Please note that the separator has to be exactly one character long
const separator = "\t" &redef;
## Separator between set elements.
## Please note that the separator has to be exactly one character long
const set_separator = "," &redef;
## String to use for empty fields.
const empty_field = "(empty)" &redef;
## String to use for an unset &optional field.
const unset_field = "-" &redef;
}

View file

@ -0,0 +1,23 @@
##! Interface for the ascii input reader.
module InputBenchmark;
export {
## multiplication factor for each second
const factor = 1.0 &redef;
## spread factor between lines
const spread = 0 &redef;
## spreading where usleep = 1000000 / autospread * num_lines
const autospread = 0.0 &redef;
## addition factor for each heartbeat
const addfactor = 0 &redef;
## stop spreading at x lines per heartbeat
const stopspreadat = 0 &redef;
## 1 -> enable timed spreading
const timedspread = 0.0 &redef;
}

Some files were not shown because too many files have changed in this diff Show more