Merge remote-tracking branch 'origin/master' into topic/seth/metrics-updates

Conflicts:
	testing/btest/Baseline/scripts.base.frameworks.metrics.basic-cluster/manager-1.metrics.log
	testing/btest/Baseline/scripts.base.frameworks.metrics.basic/metrics.log
	testing/btest/Baseline/scripts.base.frameworks.metrics.cluster-intermediate-update/manager-1.notice.log
	testing/btest/Baseline/scripts.base.frameworks.metrics.notice/notice.log
	testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro
This commit is contained in:
Seth Hall 2012-10-01 16:23:06 -04:00
commit 6750b0f7b9
911 changed files with 36856 additions and 5211 deletions

1086
CHANGES

File diff suppressed because it is too large Load diff

View file

@ -88,16 +88,64 @@ if (LIBGEOIP_FOUND)
list(APPEND OPTLIBS ${LibGeoIP_LIBRARY}) list(APPEND OPTLIBS ${LibGeoIP_LIBRARY})
endif () endif ()
set(USE_PERFTOOLS false) set(HAVE_PERFTOOLS false)
if (ENABLE_PERFTOOLS) set(USE_PERFTOOLS_DEBUG false)
find_package(GooglePerftools) set(USE_PERFTOOLS_TCMALLOC false)
if (GOOGLEPERFTOOLS_FOUND)
set(USE_PERFTOOLS true) if (NOT DISABLE_PERFTOOLS)
include_directories(BEFORE ${GooglePerftools_INCLUDE_DIR}) find_package(GooglePerftools)
list(APPEND OPTLIBS ${GooglePerftools_LIBRARIES}) endif ()
if (GOOGLEPERFTOOLS_FOUND)
set(HAVE_PERFTOOLS true)
# Non-Linux systems may not be well-supported by gperftools, so
# require explicit request from user to enable it in that case.
if (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ENABLE_PERFTOOLS)
set(USE_PERFTOOLS_TCMALLOC true)
if (ENABLE_PERFTOOLS_DEBUG)
# Enable heap debugging with perftools.
set(USE_PERFTOOLS_DEBUG true)
include_directories(BEFORE ${GooglePerftools_INCLUDE_DIR})
list(APPEND OPTLIBS ${GooglePerftools_LIBRARIES_DEBUG})
else ()
# Link in tcmalloc for better performance.
list(APPEND OPTLIBS ${GooglePerftools_LIBRARIES})
endif ()
endif () endif ()
endif () endif ()
set(USE_DATASERIES false)
find_package(Lintel)
find_package(DataSeries)
find_package(LibXML2)
if (LINTEL_FOUND AND DATASERIES_FOUND AND LIBXML2_FOUND)
set(USE_DATASERIES true)
include_directories(BEFORE ${Lintel_INCLUDE_DIR})
include_directories(BEFORE ${DataSeries_INCLUDE_DIR})
include_directories(BEFORE ${LibXML2_INCLUDE_DIR})
list(APPEND OPTLIBS ${Lintel_LIBRARIES})
list(APPEND OPTLIBS ${DataSeries_LIBRARIES})
list(APPEND OPTLIBS ${LibXML2_LIBRARIES})
endif()
set(USE_ELASTICSEARCH false)
set(USE_CURL false)
find_package(CURL)
if (CURL_FOUND)
set(USE_ELASTICSEARCH true)
set(USE_CURL true)
include_directories(BEFORE ${CURL_INCLUDE_DIR})
list(APPEND OPTLIBS ${CURL_LIBRARIES})
endif()
if (ENABLE_PERFTOOLS_DEBUG)
# Just a no op to prevent CMake from complaining about manually-specified
# ENABLE_PERFTOOLS_DEBUG not being used if google perftools weren't found
endif ()
set(brodeps set(brodeps
${BinPAC_LIBRARY} ${BinPAC_LIBRARY}
${PCAP_LIBRARY} ${PCAP_LIBRARY}
@ -182,7 +230,13 @@ message(
"\nAux. Tools: ${INSTALL_AUX_TOOLS}" "\nAux. Tools: ${INSTALL_AUX_TOOLS}"
"\n" "\n"
"\nGeoIP: ${USE_GEOIP}" "\nGeoIP: ${USE_GEOIP}"
"\nGoogle perftools: ${USE_PERFTOOLS}" "\ngperftools found: ${HAVE_PERFTOOLS}"
"\n tcmalloc: ${USE_PERFTOOLS_TCMALLOC}"
"\n debugging: ${USE_PERFTOOLS_DEBUG}"
"\ncURL: ${USE_CURL}"
"\n"
"\nDataSeries: ${USE_DATASERIES}"
"\nElasticSearch: ${USE_ELASTICSEARCH}"
"\n" "\n"
"\n================================================================\n" "\n================================================================\n"
) )

144
DocSourcesList.cmake Normal file
View file

@ -0,0 +1,144 @@
# DO NOT EDIT
# This file is auto-generated from the genDocSourcesList.sh script.
#
# This is a list of Bro script sources for which to generate reST documentation.
# It will be included inline in the CMakeLists.txt found in the same directory
# in order to create Makefile targets that define how to generate reST from
# a given Bro script.
#
# Note: any path prefix of the script (2nd argument of rest_target macro)
# will be used to derive what path under scripts/ the generated documentation
# will be placed.
set(psd ${PROJECT_SOURCE_DIR}/scripts)
rest_target(${CMAKE_CURRENT_SOURCE_DIR} example.bro internal)
rest_target(${psd} base/init-default.bro internal)
rest_target(${psd} base/init-bare.bro internal)
rest_target(${CMAKE_BINARY_DIR}/src base/bro.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/const.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/event.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/logging.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/reporter.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/strings.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/types.bif.bro)
rest_target(${psd} base/frameworks/cluster/main.bro)
rest_target(${psd} base/frameworks/cluster/nodes/manager.bro)
rest_target(${psd} base/frameworks/cluster/nodes/proxy.bro)
rest_target(${psd} base/frameworks/cluster/nodes/worker.bro)
rest_target(${psd} base/frameworks/cluster/setup-connections.bro)
rest_target(${psd} base/frameworks/communication/main.bro)
rest_target(${psd} base/frameworks/control/main.bro)
rest_target(${psd} base/frameworks/dpd/main.bro)
rest_target(${psd} base/frameworks/intel/main.bro)
rest_target(${psd} base/frameworks/logging/main.bro)
rest_target(${psd} base/frameworks/logging/postprocessors/scp.bro)
rest_target(${psd} base/frameworks/logging/postprocessors/sftp.bro)
rest_target(${psd} base/frameworks/logging/writers/ascii.bro)
rest_target(${psd} base/frameworks/logging/writers/dataseries.bro)
rest_target(${psd} base/frameworks/metrics/cluster.bro)
rest_target(${psd} base/frameworks/metrics/main.bro)
rest_target(${psd} base/frameworks/metrics/non-cluster.bro)
rest_target(${psd} base/frameworks/notice/actions/add-geodata.bro)
rest_target(${psd} base/frameworks/notice/actions/drop.bro)
rest_target(${psd} base/frameworks/notice/actions/email_admin.bro)
rest_target(${psd} base/frameworks/notice/actions/page.bro)
rest_target(${psd} base/frameworks/notice/actions/pp-alarms.bro)
rest_target(${psd} base/frameworks/notice/cluster.bro)
rest_target(${psd} base/frameworks/notice/extend-email/hostnames.bro)
rest_target(${psd} base/frameworks/notice/main.bro)
rest_target(${psd} base/frameworks/notice/weird.bro)
rest_target(${psd} base/frameworks/packet-filter/main.bro)
rest_target(${psd} base/frameworks/packet-filter/netstats.bro)
rest_target(${psd} base/frameworks/reporter/main.bro)
rest_target(${psd} base/frameworks/signatures/main.bro)
rest_target(${psd} base/frameworks/software/main.bro)
rest_target(${psd} base/protocols/conn/contents.bro)
rest_target(${psd} base/protocols/conn/inactivity.bro)
rest_target(${psd} base/protocols/conn/main.bro)
rest_target(${psd} base/protocols/dns/consts.bro)
rest_target(${psd} base/protocols/dns/main.bro)
rest_target(${psd} base/protocols/ftp/file-extract.bro)
rest_target(${psd} base/protocols/ftp/main.bro)
rest_target(${psd} base/protocols/ftp/utils-commands.bro)
rest_target(${psd} base/protocols/http/file-extract.bro)
rest_target(${psd} base/protocols/http/file-hash.bro)
rest_target(${psd} base/protocols/http/file-ident.bro)
rest_target(${psd} base/protocols/http/main.bro)
rest_target(${psd} base/protocols/http/utils.bro)
rest_target(${psd} base/protocols/irc/dcc-send.bro)
rest_target(${psd} base/protocols/irc/main.bro)
rest_target(${psd} base/protocols/smtp/entities-excerpt.bro)
rest_target(${psd} base/protocols/smtp/entities.bro)
rest_target(${psd} base/protocols/smtp/main.bro)
rest_target(${psd} base/protocols/ssh/main.bro)
rest_target(${psd} base/protocols/ssl/consts.bro)
rest_target(${psd} base/protocols/ssl/main.bro)
rest_target(${psd} base/protocols/ssl/mozilla-ca-list.bro)
rest_target(${psd} base/protocols/syslog/consts.bro)
rest_target(${psd} base/protocols/syslog/main.bro)
rest_target(${psd} base/utils/addrs.bro)
rest_target(${psd} base/utils/conn-ids.bro)
rest_target(${psd} base/utils/directions-and-hosts.bro)
rest_target(${psd} base/utils/files.bro)
rest_target(${psd} base/utils/numbers.bro)
rest_target(${psd} base/utils/paths.bro)
rest_target(${psd} base/utils/patterns.bro)
rest_target(${psd} base/utils/site.bro)
rest_target(${psd} base/utils/strings.bro)
rest_target(${psd} base/utils/thresholds.bro)
rest_target(${psd} policy/frameworks/communication/listen.bro)
rest_target(${psd} policy/frameworks/control/controllee.bro)
rest_target(${psd} policy/frameworks/control/controller.bro)
rest_target(${psd} policy/frameworks/dpd/detect-protocols.bro)
rest_target(${psd} policy/frameworks/dpd/packet-segment-logging.bro)
rest_target(${psd} policy/frameworks/metrics/conn-example.bro)
rest_target(${psd} policy/frameworks/metrics/http-example.bro)
rest_target(${psd} policy/frameworks/metrics/ssl-example.bro)
rest_target(${psd} policy/frameworks/software/version-changes.bro)
rest_target(${psd} policy/frameworks/software/vulnerable.bro)
rest_target(${psd} policy/integration/barnyard2/main.bro)
rest_target(${psd} policy/integration/barnyard2/types.bro)
rest_target(${psd} policy/misc/analysis-groups.bro)
rest_target(${psd} policy/misc/capture-loss.bro)
rest_target(${psd} policy/misc/loaded-scripts.bro)
rest_target(${psd} policy/misc/profiling.bro)
rest_target(${psd} policy/misc/stats.bro)
rest_target(${psd} policy/misc/trim-trace-file.bro)
rest_target(${psd} policy/protocols/conn/known-hosts.bro)
rest_target(${psd} policy/protocols/conn/known-services.bro)
rest_target(${psd} policy/protocols/conn/weirds.bro)
rest_target(${psd} policy/protocols/dns/auth-addl.bro)
rest_target(${psd} policy/protocols/dns/detect-external-names.bro)
rest_target(${psd} policy/protocols/ftp/detect.bro)
rest_target(${psd} policy/protocols/ftp/software.bro)
rest_target(${psd} policy/protocols/http/detect-MHR.bro)
rest_target(${psd} policy/protocols/http/detect-intel.bro)
rest_target(${psd} policy/protocols/http/detect-sqli.bro)
rest_target(${psd} policy/protocols/http/detect-webapps.bro)
rest_target(${psd} policy/protocols/http/header-names.bro)
rest_target(${psd} policy/protocols/http/software-browser-plugins.bro)
rest_target(${psd} policy/protocols/http/software.bro)
rest_target(${psd} policy/protocols/http/var-extraction-cookies.bro)
rest_target(${psd} policy/protocols/http/var-extraction-uri.bro)
rest_target(${psd} policy/protocols/smtp/blocklists.bro)
rest_target(${psd} policy/protocols/smtp/detect-suspicious-orig.bro)
rest_target(${psd} policy/protocols/smtp/software.bro)
rest_target(${psd} policy/protocols/ssh/detect-bruteforcing.bro)
rest_target(${psd} policy/protocols/ssh/geo-data.bro)
rest_target(${psd} policy/protocols/ssh/interesting-hostnames.bro)
rest_target(${psd} policy/protocols/ssh/software.bro)
rest_target(${psd} policy/protocols/ssl/cert-hash.bro)
rest_target(${psd} policy/protocols/ssl/expiring-certs.bro)
rest_target(${psd} policy/protocols/ssl/extract-certs-pem.bro)
rest_target(${psd} policy/protocols/ssl/known-certs.bro)
rest_target(${psd} policy/protocols/ssl/validate-certs.bro)
rest_target(${psd} policy/tuning/defaults/packet-fragments.bro)
rest_target(${psd} policy/tuning/defaults/warnings.bro)
rest_target(${psd} policy/tuning/track-all-assets.bro)
rest_target(${psd} site/local-manager.bro)
rest_target(${psd} site/local-proxy.bro)
rest_target(${psd} site/local-worker.bro)
rest_target(${psd} site/local.bro)
rest_target(${psd} test-all-policy.bro)

56
INSTALL
View file

@ -5,34 +5,44 @@ Installing Bro
Prerequisites Prerequisites
============= =============
Bro relies on the following libraries and tools, which need to be installed Bro requires the following libraries and tools to be installed
before you begin: before you begin:
* CMake 2.6.3 or greater http://www.cmake.org * CMake 2.6.3 or greater http://www.cmake.org
* Libpcap (headers and libraries) http://www.tcpdump.org * Perl (used only during the Bro build process)
* OpenSSL (headers and libraries) http://www.openssl.org * Libpcap headers and libraries http://www.tcpdump.org
* SWIG http://www.swig.org * OpenSSL headers and libraries http://www.openssl.org
* BIND8 headers and libraries
* Libmagic * Libmagic
* Libz * Libz
Bro can make uses of some optional libraries if they are found at * SWIG http://www.swig.org
installation time:
* LibGeoIP For geo-locating IP addresses.
Bro also needs the following tools, but on most systems they will
already come preinstalled:
* Bash (For Bro Control).
* BIND8 (headers and libraries)
* Bison (GNU Parser Generator) * Bison (GNU Parser Generator)
* Flex (Fast Lexical Analyzer) * Flex (Fast Lexical Analyzer)
* Perl (Used only during the Bro build process)
* Bash (for BroControl)
Bro can make use of some optional libraries and tools if they are found at
build time:
* LibGeoIP (for geo-locating IP addresses)
* gperftools (tcmalloc is used to improve memory and CPU usage)
* sendmail (for BroControl)
* ipsumdump (for trace-summary) http://www.cs.ucla.edu/~kohler/ipsumdump
* Ruby executable, library, and headers (for Broccoli Ruby bindings)
Installation Installation
@ -44,7 +54,7 @@ To build and install into ``/usr/local/bro``::
make make
make install make install
This will first build Bro into a directory inside the distribution This will first build Bro in a directory inside the distribution
called ``build/``, using default build options. It then installs all called ``build/``, using default build options. It then installs all
required files into ``/usr/local/bro``, including the Bro binary in required files into ``/usr/local/bro``, including the Bro binary in
``/usr/local/bro/bin/bro``. ``/usr/local/bro/bin/bro``.
@ -60,22 +70,22 @@ choices unless you are creating such a package.
Run ``./configure --help`` for more options. Run ``./configure --help`` for more options.
Depending on the Bro package you downloaded, there may be auxiliary Depending on the Bro package you downloaded, there may be auxiliary
tools and libraries available in the ``aux/`` directory. All of them tools and libraries available in the ``aux/`` directory. Some of them
except for ``aux/bro-aux`` will also be built and installed by doing will be automatically built and installed along with Bro. There are
``make install``. To install the programs that come in the
``aux/bro-aux`` directory, use ``make install-aux``. There are
``--disable-*`` options that can be given to the configure script to ``--disable-*`` options that can be given to the configure script to
turn off unwanted auxiliary projects. turn off unwanted auxiliary projects that would otherwise be installed
automatically. Finally, use ``make install-aux`` to install some of
the other programs that are in the ``aux/bro-aux`` directory.
OpenBSD users, please see our `FAQ OpenBSD users, please see our FAQ at
<http://www.bro-ids.org/documentation/faq.html>` if you are having http://www.bro-ids.org/documentation/faq.html if you are having
problems installing Bro. problems installing Bro.
Running Bro Running Bro
=========== ===========
Bro is a complex program and it takes a bit of time to get familiar Bro is a complex program and it takes a bit of time to get familiar
with it. A good place for newcomers to start is the Quickstart Guide with it. A good place for newcomers to start is the Quick Start Guide
at http://www.bro-ids.org/documentation/quickstart.html. at http://www.bro-ids.org/documentation/quickstart.html.
For developers that wish to run Bro directly from the ``build/`` For developers that wish to run Bro directly from the ``build/``

170
NEWS
View file

@ -3,18 +3,139 @@ Release Notes
============= =============
This document summarizes the most important changes in the current Bro This document summarizes the most important changes in the current Bro
release. For a complete list of changes, see the ``CHANGES`` file. release. For a complete list of changes, see the ``CHANGES`` file
(note that submodules, such as BroControl and Broccoli, come with
their own CHANGES.)
Bro 2.2
-------
New Functionality
~~~~~~~~~~~~~~~~~
- TODO: Update.
Changed Functionality
~~~~~~~~~~~~~~~~~~~~~
- We removed the following, already deprecated, functionality:
* Scripting language:
- &disable_print_hook attribute.
* BiF functions:
- parse_dotted_addr(), dump_config(),
make_connection_persistent(), generate_idmef(),
split_complete()
- "this" is no longer a reserved keyword.
Bro 2.1 Bro 2.1
------- -------
- Dependencies: New Functionality
~~~~~~~~~~~~~~~~~
- Bro now comes with extensive IPv6 support. Past versions offered
only basic IPv6 functionality that was rarely used in practice as it
had to be enabled explicitly. IPv6 support is now fully integrated
into all parts of Bro including protocol analysis and the scripting
language. It's on by default and no longer requires any special
configuration.
Some of the most significant enhancements include support for IPv6
fragment reassembly, support for following IPv6 extension header
chains, and support for tunnel decapsulation (6to4 and Teredo). The
DNS analyzer now handles AAAA records properly, and DNS lookups that
Bro itself performs now include AAAA queries, so that, for example,
the result returned by script-level lookups is a set that can
contain both IPv4 and IPv6 addresses. Support for the most common
ICMPv6 message types has been added. Also, the FTP EPSV and EPRT
commands are now handled properly. Internally, the way IP addresses
are stored has been improved, so Bro can handle both IPv4
and IPv6 by default without any special configuration.
In addition to Bro itself, the other Bro components have also been
made IPv6-aware by default. In particular, significant changes were
made to trace-summary, PySubnetTree, and Broccoli to support IPv6.
- Bro now decapsulates tunnels via its new tunnel framework located in
scripts/base/frameworks/tunnels. It currently supports Teredo,
AYIYA, IP-in-IP (both IPv4 and IPv6), and SOCKS. For all these, it
logs the outer tunnel connections in both conn.log and tunnel.log,
and then proceeds to analyze the inner payload as if it were not
tunneled, including also logging that session in conn.log. For
SOCKS, it generates a new socks.log in addition with more
information.
- Bro now features a flexible input framework that allows users to
integrate external information in real-time into Bro while it's
processing network traffic. The most direct use-case at the moment
is reading data from ASCII files into Bro tables, with updates
picked up automatically when the file changes during runtime. See
doc/input.rst for more information.
Internally, the input framework is structured around the notion of
"reader plugins" that make it easy to interface to different data
sources. We will add more in the future.
- BroControl now has built-in support for host-based load-balancing
when using either PF_RING, Myricom cards, or individual interfaces.
Instead of adding a separate worker entry in node.cfg for each Bro
worker process on each worker host, it is now possible to just
specify the number of worker processes on each host and BroControl
configures everything correctly (including any neccessary enviroment
variables for the balancers).
This change adds three new keywords to the node.cfg file (to be used
with worker entries): lb_procs (specifies number of workers on a
host), lb_method (specifies what type of load balancing to use:
pf_ring, myricom, or interfaces), and lb_interfaces (used only with
"lb_method=interfaces" to specify which interfaces to load-balance
on).
- Bro's default ASCII log format is not exactly the most efficient way
for storing and searching large volumes of data. An alternatives,
Bro now comes with experimental support for two alternative output
formats:
* DataSeries: an efficient binary format for recording structured
bulk data. DataSeries is developed and maintained at HP Labs.
See doc/logging-dataseries for more information.
* ElasticSearch: a distributed RESTful, storage engine and search
engine built on top of Apache Lucene. It scales very well, both
for distributed indexing and distributed searching. See
doc/logging-elasticsearch.rst for more information.
Note that at this point, we consider Bro's support for these two
formats as prototypes for collecting experience with alternative
outputs. We do not yet recommend them for production (but welcome
feedback!)
Changed Functionality
~~~~~~~~~~~~~~~~~~~~~
The following summarizes the most important differences in existing
functionality. Note that this list is not complete, see CHANGES for
the full set.
- Changes in dependencies:
* Bro now requires CMake >= 2.6.3. * Bro now requires CMake >= 2.6.3.
- Bro now supports IPv6 out of the box; the configure switch * On Linux, Bro now links in tcmalloc (part of Google perftools)
--enable-brov6 is gone. if found at configure time. Doing so can significantly improve
memory and CPU use.
On the other platforms, the new configure option
--enable-perftools can be used to enable linking to tcmalloc.
(Note that perftools's support for non-Linux platforms may be
less reliable).
- The configure switch --enable-brov6 is gone.
- DNS name lookups performed by Bro now also query AAAA records. The - DNS name lookups performed by Bro now also query AAAA records. The
results of the A and AAAA queries for a given hostname are combined results of the A and AAAA queries for a given hostname are combined
@ -31,7 +152,44 @@ Bro 2.1
- The syntax for IPv6 literals changed from "2607:f8b0:4009:802::1012" - The syntax for IPv6 literals changed from "2607:f8b0:4009:802::1012"
to "[2607:f8b0:4009:802::1012]". to "[2607:f8b0:4009:802::1012]".
TODO: Extend. - Bro now spawns threads for doing its logging. From a user's
perspective not much should change, except that the OS may now show
a bunch of Bro threads.
- We renamed the configure option --enable-perftools to
--enable-perftools-debug to indicate that the switch is only relevant
for debugging the heap.
- Bro's ICMP analyzer now handles both IPv4 and IPv6 messages with a
joint set of events. The `icmp_conn` record got a new boolean field
'v6' that indicates whether the ICMP message is v4 or v6.
- Log postprocessor scripts get an additional argument indicating the
type of the log writer in use (e.g., "ascii").
- BroControl's make-archive-name script also receives the writer
type, but as its 2nd(!) argument. If you're using a custom version
of that script, you need to adapt it. See the shipped version for
details.
- Signature files can now be loaded via the new "@load-sigs"
directive. In contrast to the existing (and still supported)
signature_files constant, this can be used to load signatures
relative to the current script (e.g., "@load-sigs ./foo.sig").
- The options "tunnel_port" and "parse_udp_tunnels" have been removed.
Bro now supports decapsulating tunnels directly for protocols it
understands.
- ASCII logs now record the time when they were opened/closed at the
beginning and end of the file, respectively (wall clock). The
options LogAscii::header_prefix and LogAscii::include_header have
been renamed to LogAscii::meta_prefix and LogAscii::include_meta,
respectively.
- The ASCII writers "header_*" options have been renamed to "meta_*"
(because there's now also a footer).
Bro 2.0 Bro 2.0
------- -------
@ -64,7 +222,7 @@ final release are:
ASCII logger now respects to add a suffix to the log files it ASCII logger now respects to add a suffix to the log files it
creates. creates.
* The ASCII logs now include further header information, and * The ASCII logs now include further header information, and
fields set to an empty value are now logged as ``(empty)`` by fields set to an empty value are now logged as ``(empty)`` by
default (instead of ``-``, which is already used for fields that default (instead of ``-``, which is already used for fields that
are not set at all). are not set at all).

View file

@ -1 +1 @@
2.0-150 2.1-39

View file

@ -109,7 +109,19 @@
#cmakedefine HAVE_GEOIP_CITY_EDITION_REV0_V6 #cmakedefine HAVE_GEOIP_CITY_EDITION_REV0_V6
/* Use Google's perftools */ /* Use Google's perftools */
#cmakedefine USE_PERFTOOLS #cmakedefine USE_PERFTOOLS_DEBUG
/* Analyze Mobile IPv6 traffic */
#cmakedefine ENABLE_MOBILE_IPV6
/* Use libCurl. */
#cmakedefine USE_CURL
/* Use the DataSeries writer. */
#cmakedefine USE_DATASERIES
/* Use the ElasticSearch writer. */
#cmakedefine USE_ELASTICSEARCH
/* Version number of package */ /* Version number of package */
#define VERSION "@VERSION@" #define VERSION "@VERSION@"
@ -149,3 +161,51 @@
#ifndef HAVE_DLT_PPP_SERIAL #ifndef HAVE_DLT_PPP_SERIAL
#define DLT_PPP_SERIAL @DLT_PPP_SERIAL@ #define DLT_PPP_SERIAL @DLT_PPP_SERIAL@
#endif #endif
/* IPv6 Next Header values defined by RFC 3542 */
#cmakedefine HAVE_IPPROTO_HOPOPTS
#ifndef HAVE_IPPROTO_HOPOPTS
#define IPPROTO_HOPOPTS 0
#endif
#cmakedefine HAVE_IPPROTO_IPV6
#ifndef HAVE_IPPROTO_IPV6
#define IPPROTO_IPV6 41
#endif
#cmakedefine HAVE_IPPROTO_IPV4
#ifndef HAVE_IPPROTO_IPV4
#define IPPROTO_IPV4 4
#endif
#cmakedefine HAVE_IPPROTO_ROUTING
#ifndef HAVE_IPPROTO_ROUTING
#define IPPROTO_ROUTING 43
#endif
#cmakedefine HAVE_IPPROTO_FRAGMENT
#ifndef HAVE_IPPROTO_FRAGMENT
#define IPPROTO_FRAGMENT 44
#endif
#cmakedefine HAVE_IPPROTO_ESP
#ifndef HAVE_IPPROTO_ESP
#define IPPROTO_ESP 50
#endif
#cmakedefine HAVE_IPPROTO_AH
#ifndef HAVE_IPPROTO_AH
#define IPPROTO_AH 51
#endif
#cmakedefine HAVE_IPPROTO_ICMPV6
#ifndef HAVE_IPPROTO_ICMPV6
#define IPPROTO_ICMPV6 58
#endif
#cmakedefine HAVE_IPPROTO_NONE
#ifndef HAVE_IPPROTO_NONE
#define IPPROTO_NONE 59
#endif
#cmakedefine HAVE_IPPROTO_DSTOPTS
#ifndef HAVE_IPPROTO_DSTOPTS
#define IPPROTO_DSTOPTS 60
#endif
/* IPv6 options structure defined by RFC 3542 */
#cmakedefine HAVE_IP6_OPT
/* Common IPv6 extension structure */
#cmakedefine HAVE_IP6_EXT

60
configure vendored
View file

@ -1,7 +1,7 @@
#!/bin/sh #!/bin/sh
# Convenience wrapper for easily viewing/setting options that # Convenience wrapper for easily viewing/setting options that
# the project's CMake scripts will recognize # the project's CMake scripts will recognize
set -e
command="$0 $*" command="$0 $*"
# check for `cmake` command # check for `cmake` command
@ -24,13 +24,18 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--prefix=PREFIX installation directory [/usr/local/bro] --prefix=PREFIX installation directory [/usr/local/bro]
--scriptdir=PATH root installation directory for Bro scripts --scriptdir=PATH root installation directory for Bro scripts
[PREFIX/share/bro] [PREFIX/share/bro]
--conf-files-dir=PATH config files installation directory [PREFIX/etc]
Optional Features: Optional Features:
--enable-debug compile in debugging mode --enable-debug compile in debugging mode
--enable-perftools use Google's perftools --enable-mobile-ipv6 analyze mobile IPv6 features defined by RFC 6275
--enable-perftools force use of Google perftools on non-Linux systems
(automatically on when perftools is present on Linux)
--enable-perftools-debug use Google's perftools for debugging
--disable-broccoli don't build or install the Broccoli library --disable-broccoli don't build or install the Broccoli library
--disable-broctl don't install Broctl --disable-broctl don't install Broctl
--disable-auxtools don't build or install auxilliary tools --disable-auxtools don't build or install auxiliary tools
--disable-perftools don't try to build with Google Perftools
--disable-python don't try to build python bindings for broccoli --disable-python don't try to build python bindings for broccoli
--disable-ruby don't try to build ruby bindings for broccoli --disable-ruby don't try to build ruby bindings for broccoli
@ -54,6 +59,8 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--with-ruby-lib=PATH path to ruby library --with-ruby-lib=PATH path to ruby library
--with-ruby-inc=PATH path to ruby headers --with-ruby-inc=PATH path to ruby headers
--with-swig=PATH path to SWIG executable --with-swig=PATH path to SWIG executable
--with-dataseries=PATH path to DataSeries and Lintel libraries
--with-xml2=PATH path to libxml2 installation (for DataSeries)
Packaging Options (for developers): Packaging Options (for developers):
--binary-package toggle special logic for binary packaging --binary-package toggle special logic for binary packaging
@ -85,19 +92,24 @@ append_cache_entry () {
# set defaults # set defaults
builddir=build builddir=build
prefix=/usr/local/bro
CMakeCacheEntries="" CMakeCacheEntries=""
append_cache_entry CMAKE_INSTALL_PREFIX PATH /usr/local/bro append_cache_entry CMAKE_INSTALL_PREFIX PATH $prefix
append_cache_entry BRO_ROOT_DIR PATH /usr/local/bro append_cache_entry BRO_ROOT_DIR PATH $prefix
append_cache_entry PY_MOD_INSTALL_DIR PATH /usr/local/bro/lib/broctl append_cache_entry PY_MOD_INSTALL_DIR PATH $prefix/lib/broctl
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING /usr/local/bro/share/bro append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro
append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc
append_cache_entry ENABLE_DEBUG BOOL false append_cache_entry ENABLE_DEBUG BOOL false
append_cache_entry ENABLE_PERFTOOLS BOOL false append_cache_entry ENABLE_PERFTOOLS BOOL false
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false
append_cache_entry BinPAC_SKIP_INSTALL BOOL true append_cache_entry BinPAC_SKIP_INSTALL BOOL true
append_cache_entry BUILD_SHARED_LIBS BOOL true append_cache_entry BUILD_SHARED_LIBS BOOL true
append_cache_entry INSTALL_AUX_TOOLS BOOL true append_cache_entry INSTALL_AUX_TOOLS BOOL true
append_cache_entry INSTALL_BROCCOLI BOOL true append_cache_entry INSTALL_BROCCOLI BOOL true
append_cache_entry INSTALL_BROCTL BOOL true append_cache_entry INSTALL_BROCTL BOOL true
append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING append_cache_entry CPACK_SOURCE_IGNORE_FILES STRING
append_cache_entry ENABLE_MOBILE_IPV6 BOOL false
append_cache_entry DISABLE_PERFTOOLS BOOL false
# parse arguments # parse arguments
while [ $# -ne 0 ]; do while [ $# -ne 0 ]; do
@ -118,23 +130,32 @@ while [ $# -ne 0 ]; do
CMakeGenerator="$optarg" CMakeGenerator="$optarg"
;; ;;
--prefix=*) --prefix=*)
prefix=$optarg
append_cache_entry CMAKE_INSTALL_PREFIX PATH $optarg append_cache_entry CMAKE_INSTALL_PREFIX PATH $optarg
append_cache_entry BRO_ROOT_DIR PATH $optarg append_cache_entry BRO_ROOT_DIR PATH $optarg
append_cache_entry PY_MOD_INSTALL_DIR PATH $optarg/lib/broctl append_cache_entry PY_MOD_INSTALL_DIR PATH $optarg/lib/broctl
if [ "$user_set_scriptdir" != "true" ]; then
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg/share/bro
fi
;; ;;
--scriptdir=*) --scriptdir=*)
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg
user_set_scriptdir="true" user_set_scriptdir="true"
;; ;;
--conf-files-dir=*)
append_cache_entry BRO_ETC_INSTALL_DIR PATH $optarg
user_set_conffilesdir="true"
;;
--enable-debug) --enable-debug)
append_cache_entry ENABLE_DEBUG BOOL true append_cache_entry ENABLE_DEBUG BOOL true
;; ;;
--enable-mobile-ipv6)
append_cache_entry ENABLE_MOBILE_IPV6 BOOL true
;;
--enable-perftools) --enable-perftools)
append_cache_entry ENABLE_PERFTOOLS BOOL true append_cache_entry ENABLE_PERFTOOLS BOOL true
;; ;;
--enable-perftools-debug)
append_cache_entry ENABLE_PERFTOOLS BOOL true
append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL true
;;
--disable-broccoli) --disable-broccoli)
append_cache_entry INSTALL_BROCCOLI BOOL false append_cache_entry INSTALL_BROCCOLI BOOL false
;; ;;
@ -144,6 +165,9 @@ while [ $# -ne 0 ]; do
--disable-auxtools) --disable-auxtools)
append_cache_entry INSTALL_AUX_TOOLS BOOL false append_cache_entry INSTALL_AUX_TOOLS BOOL false
;; ;;
--disable-perftools)
append_cache_entry DISABLE_PERFTOOLS BOOL true
;;
--disable-python) --disable-python)
append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true append_cache_entry DISABLE_PYTHON_BINDINGS BOOL true
;; ;;
@ -178,7 +202,6 @@ while [ $# -ne 0 ]; do
append_cache_entry LibGeoIP_ROOT_DIR PATH $optarg append_cache_entry LibGeoIP_ROOT_DIR PATH $optarg
;; ;;
--with-perftools=*) --with-perftools=*)
append_cache_entry ENABLE_PERFTOOLS BOOL true
append_cache_entry GooglePerftools_ROOT_DIR PATH $optarg append_cache_entry GooglePerftools_ROOT_DIR PATH $optarg
;; ;;
--with-python=*) --with-python=*)
@ -204,6 +227,13 @@ while [ $# -ne 0 ]; do
--with-swig=*) --with-swig=*)
append_cache_entry SWIG_EXECUTABLE PATH $optarg append_cache_entry SWIG_EXECUTABLE PATH $optarg
;; ;;
--with-dataseries=*)
append_cache_entry DataSeries_ROOT_DIR PATH $optarg
append_cache_entry Lintel_ROOT_DIR PATH $optarg
;;
--with-xml2=*)
append_cache_entry LibXML2_ROOT_DIR PATH $optarg
;;
--binary-package) --binary-package)
append_cache_entry BINARY_PACKAGING_MODE BOOL true append_cache_entry BINARY_PACKAGING_MODE BOOL true
;; ;;
@ -227,6 +257,14 @@ while [ $# -ne 0 ]; do
shift shift
done done
if [ "$user_set_scriptdir" != "true" ]; then
append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro
fi
if [ "$user_set_conffilesdir" != "true" ]; then
append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc
fi
if [ -d $builddir ]; then if [ -d $builddir ]; then
# If build directory exists, check if it has a CMake cache # If build directory exists, check if it has a CMake cache
if [ -f $builddir/CMakeCache.txt ]; then if [ -f $builddir/CMakeCache.txt ]; then

View file

@ -29,7 +29,7 @@ class BroLexer(RegexLexer):
r'|vector)\b', Keyword.Type), r'|vector)\b', Keyword.Type),
(r'(T|F)\b', Keyword.Constant), (r'(T|F)\b', Keyword.Constant),
(r'(&)((?:add|delete|expire)_func|attr|(create|read|write)_expire' (r'(&)((?:add|delete|expire)_func|attr|(create|read|write)_expire'
r'|default|disable_print_hook|raw_output|encrypt|group|log' r'|default|raw_output|encrypt|group|log'
r'|mergeable|optional|persistent|priority|redef' r'|mergeable|optional|persistent|priority|redef'
r'|rotate_(?:interval|size)|synchronized)\b', bygroups(Punctuation, r'|rotate_(?:interval|size)|synchronized)\b', bygroups(Punctuation,
Keyword)), Keyword)),

Binary file not shown.

View file

@ -46,7 +46,7 @@ directions:
http://securityonion.blogspot.com/2011/10/when-is-full-packet-capture-not-full.html http://securityonion.blogspot.com/2011/10/when-is-full-packet-capture-not-full.html
What does an error message like ``internal error: NB-DNS error`` mean? What does an error message like ``internal error: NB-DNS error`` mean?
--------------------------------------------------------------------------------------------------------------------------------- ----------------------------------------------------------------------
That often means that DNS is not set up correctly on the system That often means that DNS is not set up correctly on the system
running Bro. Try verifying from the command line that DNS lookups running Bro. Try verifying from the command line that DNS lookups
@ -65,6 +65,15 @@ Generally, please note that we do not regularly test OpenBSD builds.
We appreciate any patches that improve Bro's support for this We appreciate any patches that improve Bro's support for this
platform. platform.
How do BroControl options affect Bro script variables?
------------------------------------------------------
Some (but not all) BroControl options override a corresponding Bro script variable.
For example, setting the BroControl option "LogRotationInterval" will override
the value of the Bro script variable "Log::default_rotation_interval".
See the :doc:`BroControl Documentation <components/broctl/README>` to find out
which BroControl options override Bro script variables, and for more discussion
on site-specific customization.
Usage Usage
===== =====

View file

@ -24,6 +24,7 @@ Frameworks
notice notice
logging logging
input
cluster cluster
signatures signatures

407
doc/input.rst Normal file
View file

@ -0,0 +1,407 @@
==============================================
Loading Data into Bro with the Input Framework
==============================================
.. rst-class:: opening
Bro now features a flexible input framework that allows users
to import data into Bro. Data is either read into Bro tables or
converted to events which can then be handled by scripts.
This document gives an overview of how to use the input framework
with some examples. For more complex scenarios it is
worthwhile to take a look at the unit tests in
``testing/btest/scripts/base/frameworks/input/``.
.. contents::
Reading Data into Tables
========================
Probably the most interesting use-case of the input framework is to
read data into a Bro table.
By default, the input framework reads the data in the same format
as it is written by the logging framework in Bro - a tab-separated
ASCII file.
We will show the ways to read files into Bro with a simple example.
For this example we assume that we want to import data from a blacklist
that contains server IP addresses as well as the timestamp and the reason
for the block.
An example input file could look like this:
::
#fields ip timestamp reason
192.168.17.1 1333252748 Malware host
192.168.27.2 1330235733 Botnet server
192.168.250.3 1333145108 Virus detected
To read a file into a Bro table, two record types have to be defined.
One contains the types and names of the columns that should constitute the
table keys and the second contains the types and names of the columns that
should constitute the table values.
In our case, we want to be able to lookup IPs. Hence, our key record
only contains the server IP. All other elements should be stored as
the table content.
The two records are defined as:
.. code:: bro
type Idx: record {
ip: addr;
};
type Val: record {
timestamp: time;
reason: string;
};
Note that the names of the fields in the record definitions have to correspond
to the column names listed in the '#fields' line of the log file, in this
case 'ip', 'timestamp', and 'reason'.
The log file is read into the table with a simple call of the ``add_table``
function:
.. code:: bro
global blacklist: table[addr] of Val = table();
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist]);
Input::remove("blacklist");
With these three lines we first create an empty table that should contain the
blacklist data and then instruct the input framework to open an input stream
named ``blacklist`` to read the data into the table. The third line removes the
input stream again, because we do not need it any more after the data has been
read.
Because some data files can - potentially - be rather big, the input framework
works asynchronously. A new thread is created for each new input stream.
This thread opens the input data file, converts the data into a Bro format and
sends it back to the main Bro thread.
Because of this, the data is not immediately accessible. Depending on the
size of the data source it might take from a few milliseconds up to a few
seconds until all data is present in the table. Please note that this means
that when Bro is running without an input source or on very short captured
files, it might terminate before the data is present in the system (because
Bro already handled all packets before the import thread finished).
Subsequent calls to an input source are queued until the previous action has
been completed. Because of this, it is, for example, possible to call
``add_table`` and ``remove`` in two subsequent lines: the ``remove`` action
will remain queued until the first read has been completed.
Once the input framework finishes reading from a data source, it fires
the ``update_finished`` event. Once this event has been received all data
from the input file is available in the table.
.. code:: bro
event Input::update_finished(name: string, source: string) {
# now all data is in the table
print blacklist;
}
The table can also already be used while the data is still being read - it
just might not contain all lines in the input file when the event has not
yet fired. After it has been populated it can be used like any other Bro
table and blacklist entries can easily be tested:
.. code:: bro
if ( 192.168.18.12 in blacklist )
# take action
Re-reading and streaming data
-----------------------------
For many data sources, like for many blacklists, the source data is continually
changing. For these cases, the Bro input framework supports several ways to
deal with changing data files.
The first, very basic method is an explicit refresh of an input stream. When
an input stream is open, the function ``force_update`` can be called. This
will trigger a complete refresh of the table; any changed elements from the
file will be updated. After the update is finished the ``update_finished``
event will be raised.
In our example the call would look like:
.. code:: bro
Input::force_update("blacklist");
The input framework also supports two automatic refresh modes. The first mode
continually checks if a file has been changed. If the file has been changed, it
is re-read and the data in the Bro table is updated to reflect the current
state. Each time a change has been detected and all the new data has been
read into the table, the ``update_finished`` event is raised.
The second mode is a streaming mode. This mode assumes that the source data
file is an append-only file to which new data is continually appended. Bro
continually checks for new data at the end of the file and will add the new
data to the table. If newer lines in the file have the same index as previous
lines, they will overwrite the values in the output table. Because of the
nature of streaming reads (data is continually added to the table),
the ``update_finished`` event is never raised when using streaming reads.
The reading mode can be selected by setting the ``mode`` option of the
add_table call. Valid values are ``MANUAL`` (the default), ``REREAD``
and ``STREAM``.
Hence, when adding ``$mode=Input::REREAD`` to the previous example, the
blacklist table will always reflect the state of the blacklist input file.
.. code:: bro
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD]);
Receiving change events
-----------------------
When re-reading files, it might be interesting to know exactly which lines in
the source files have changed.
For this reason, the input framework can raise an event each time when a data
item is added to, removed from or changed in a table.
The event definition looks like this:
.. code:: bro
event entry(description: Input::TableDescription, tpe: Input::Event, left: Idx, right: Val) {
# act on values
}
The event has to be specified in ``$ev`` in the ``add_table`` call:
.. code:: bro
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD, $ev=entry]);
The ``description`` field of the event contains the arguments that were
originally supplied to the add_table call. Hence, the name of the stream can,
for example, be accessed with ``description$name``. ``tpe`` is an enum
containing the type of the change that occurred.
If a line that was not previously present in the table has been added,
then ``tpe`` will contain ``Input::EVENT_NEW``. In this case ``left`` contains
the index of the added table entry and ``right`` contains the values of the
added entry.
If a table entry that already was present is altered during the re-reading or
streaming read of a file, ``tpe`` will contain ``Input::EVENT_CHANGED``. In
this case ``left`` contains the index of the changed table entry and ``right``
contains the values of the entry before the change. The reason for this is
that the table already has been updated when the event is raised. The current
value in the table can be ascertained by looking up the current table value.
Hence it is possible to compare the new and the old values of the table.
If a table element is removed because it was no longer present during a
re-read, then ``tpe`` will contain ``Input::REMOVED``. In this case ``left``
contains the index and ``right`` the values of the removed element.
Filtering data during import
----------------------------
The input framework also allows a user to filter the data during the import.
To this end, predicate functions are used. A predicate function is called
before a new element is added/changed/removed from a table. The predicate
can either accept or veto the change by returning true for an accepted
change and false for a rejected change. Furthermore, it can alter the data
before it is written to the table.
The following example filter will reject to add entries to the table when
they were generated over a month ago. It will accept all changes and all
removals of values that are already present in the table.
.. code:: bro
Input::add_table([$source="blacklist.file", $name="blacklist", $idx=Idx, $val=Val, $destination=blacklist, $mode=Input::REREAD,
$pred(typ: Input::Event, left: Idx, right: Val) = {
if ( typ != Input::EVENT_NEW ) {
return T;
}
return ( ( current_time() - right$timestamp ) < (30 day) );
}]);
To change elements while they are being imported, the predicate function can
manipulate ``left`` and ``right``. Note that predicate functions are called
before the change is committed to the table. Hence, when a table element is
changed (``tpe`` is ``INPUT::EVENT_CHANGED``), ``left`` and ``right``
contain the new values, but the destination (``blacklist`` in our example)
still contains the old values. This allows predicate functions to examine
the changes between the old and the new version before deciding if they
should be allowed.
Different readers
-----------------
The input framework supports different kinds of readers for different kinds
of source data files. At the moment, the default reader reads ASCII files
formatted in the Bro log file format (tab-separated values). At the moment,
Bro comes with two other readers. The ``RAW`` reader reads a file that is
split by a specified record separator (usually newline). The contents are
returned line-by-line as strings; it can, for example, be used to read
configuration files and the like and is probably
only useful in the event mode and not for reading data to tables.
Another included reader is the ``BENCHMARK`` reader, which is being used
to optimize the speed of the input framework. It can generate arbitrary
amounts of semi-random data in all Bro data types supported by the input
framework.
In the future, the input framework will get support for new data sources
like, for example, different databases.
Add_table options
-----------------
This section lists all possible options that can be used for the add_table
function and gives a short explanation of their use. Most of the options
already have been discussed in the previous sections.
The possible fields that can be set for a table stream are:
``source``
A mandatory string identifying the source of the data.
For the ASCII reader this is the filename.
``name``
A mandatory name for the filter that can later be used
to manipulate it further.
``idx``
Record type that defines the index of the table.
``val``
Record type that defines the values of the table.
``reader``
The reader used for this stream. Default is ``READER_ASCII``.
``mode``
The mode in which the stream is opened. Possible values are
``MANUAL``, ``REREAD`` and ``STREAM``. Default is ``MANUAL``.
``MANUAL`` means that the file is not updated after it has
been read. Changes to the file will not be reflected in the
data Bro knows. ``REREAD`` means that the whole file is read
again each time a change is found. This should be used for
files that are mapped to a table where individual lines can
change. ``STREAM`` means that the data from the file is
streamed. Events / table entries will be generated as new
data is appended to the file.
``destination``
The destination table.
``ev``
Optional event that is raised, when values are added to,
changed in, or deleted from the table. Events are passed an
Input::Event description as the first argument, the index
record as the second argument and the values as the third
argument.
``pred``
Optional predicate, that can prevent entries from being added
to the table and events from being sent.
``want_record``
Boolean value, that defines if the event wants to receive the
fields inside of a single record value, or individually
(default). This can be used if ``val`` is a record
containing only one type. In this case, if ``want_record`` is
set to false, the table will contain elements of the type
contained in ``val``.
Reading Data to Events
======================
The second supported mode of the input framework is reading data to Bro
events instead of reading them to a table using event streams.
Event streams work very similarly to table streams that were already
discussed in much detail. To read the blacklist of the previous example
into an event stream, the following Bro code could be used:
.. code:: bro
type Val: record {
ip: addr;
timestamp: time;
reason: string;
};
event blacklistentry(description: Input::EventDescription, tpe: Input::Event, ip: addr, timestamp: time, reason: string) {
# work with event data
}
event bro_init() {
Input::add_event([$source="blacklist.file", $name="blacklist", $fields=Val, $ev=blacklistentry]);
}
The main difference in the declaration of the event stream is, that an event
stream needs no separate index and value declarations -- instead, all source
data types are provided in a single record definition.
Apart from this, event streams work exactly the same as table streams and
support most of the options that are also supported for table streams.
The options that can be set when creating an event stream with
``add_event`` are:
``source``
A mandatory string identifying the source of the data.
For the ASCII reader this is the filename.
``name``
A mandatory name for the stream that can later be used
to remove it.
``fields``
Name of a record type containing the fields, which should be
retrieved from the input stream.
``ev``
The event which is fired, after a line has been read from the
input source. The first argument that is passed to the event
is an Input::Event structure, followed by the data, either
inside of a record (if ``want_record is set``) or as
individual fields. The Input::Event structure can contain
information, if the received line is ``NEW``, has been
``CHANGED`` or ``DELETED``. Since the ASCII reader cannot
track this information for event filters, the value is
always ``NEW`` at the moment.
``mode``
The mode in which the stream is opened. Possible values are
``MANUAL``, ``REREAD`` and ``STREAM``. Default is ``MANUAL``.
``MANUAL`` means that the file is not updated after it has
been read. Changes to the file will not be reflected in the
data Bro knows. ``REREAD`` means that the whole file is read
again each time a change is found. This should be used for
files that are mapped to a table where individual lines can
change. ``STREAM`` means that the data from the file is
streamed. Events / table entries will be generated as new
data is appended to the file.
``reader``
The reader used for this stream. Default is ``READER_ASCII``.
``want_record``
Boolean value, that defines if the event wants to receive the
fields inside of a single record value, or individually
(default). If this is set to true, the event will receive a
single record of the type provided in ``fields``.

186
doc/logging-dataseries.rst Normal file
View file

@ -0,0 +1,186 @@
=============================
Binary Output with DataSeries
=============================
.. rst-class:: opening
Bro's default ASCII log format is not exactly the most efficient
way for storing and searching large volumes of data. An an
alternative, Bro comes with experimental support for `DataSeries
<http://www.hpl.hp.com/techreports/2009/HPL-2009-323.html>`_
output, an efficient binary format for recording structured bulk
data. DataSeries is developed and maintained at HP Labs.
.. contents::
Installing DataSeries
---------------------
To use DataSeries, its libraries must be available at compile-time,
along with the supporting *Lintel* package. Generally, both are
distributed on `HP Labs' web site
<http://tesla.hpl.hp.com/opensource/>`_. Currently, however, you need
to use recent development versions for both packages, which you can
download from github like this::
git clone http://github.com/dataseries/Lintel
git clone http://github.com/dataseries/DataSeries
To build and install the two into ``<prefix>``, do::
( cd Lintel && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX=<prefix> .. && make && make install )
( cd DataSeries && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX=<prefix> .. && make && make install )
Please refer to the packages' documentation for more information about
the installation process. In particular, there's more information on
required and optional `dependencies for Lintel
<https://raw.github.com/dataseries/Lintel/master/doc/dependencies.txt>`_
and `dependencies for DataSeries
<https://raw.github.com/dataseries/DataSeries/master/doc/dependencies.txt>`_.
For users on RedHat-style systems, you'll need the following::
yum install libxml2-devel boost-devel
Compiling Bro with DataSeries Support
-------------------------------------
Once you have installed DataSeries, Bro's ``configure`` should pick it
up automatically as long as it finds it in a standard system location.
Alternatively, you can specify the DataSeries installation prefix
manually with ``--with-dataseries=<prefix>``. Keep an eye on
``configure``'s summary output, if it looks like the following, Bro
found DataSeries and will compile in the support::
# ./configure --with-dataseries=/usr/local
[...]
====================| Bro Build Summary |=====================
[...]
DataSeries: true
[...]
================================================================
Activating DataSeries
---------------------
The direct way to use DataSeries is to switch *all* log files over to
the binary format. To do that, just add ``redef
Log::default_writer=Log::WRITER_DATASERIES;`` to your ``local.bro``.
For testing, you can also just pass that on the command line::
bro -r trace.pcap Log::default_writer=Log::WRITER_DATASERIES
With that, Bro will now write all its output into DataSeries files
``*.ds``. You can inspect these using DataSeries's set of command line
tools, which its installation process installs into ``<prefix>/bin``.
For example, to convert a file back into an ASCII representation::
$ ds2txt conn.log
[... We skip a bunch of metadata here ...]
ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes
1300475167.096535 CRCC5OdDlXe 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0
1300475167.097012 o7XBsfvo3U1 fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0.000000 0 0 S0 F 0 D 1 199 0 0
1300475167.099816 pXPi1kPMgxb 141.142.220.50 5353 224.0.0.251 5353 udp 0.000000 0 0 S0 F 0 D 1 179 0 0
1300475168.853899 R7sOc16woCj 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 38 89 SF F 0 Dd 1 66 1 117
1300475168.854378 Z6dfHVmt0X7 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 52 99 SF F 0 Dd 1 80 1 127
1300475168.854837 k6T92WxgNAh 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF F 0 Dd 1 66 1 211
[...]
(``--skip-all`` suppresses the metadata.)
Note that the ASCII conversion is *not* equivalent to Bro's default
output format.
You can also switch only individual files over to DataSeries by adding
code like this to your ``local.bro``:
.. code:: bro
event bro_init()
{
local f = Log::get_filter(Conn::LOG, "default"); # Get default filter for connection log.
f$writer = Log::WRITER_DATASERIES; # Change writer type.
Log::add_filter(Conn::LOG, f); # Replace filter with adapted version.
}
Bro's DataSeries writer comes with a few tuning options, see
:doc:`scripts/base/frameworks/logging/writers/dataseries`.
Working with DataSeries
=======================
Here are a few examples of using DataSeries command line tools to work
with the output files.
* Printing CSV::
$ ds2txt --csv conn.log
ts,uid,id.orig_h,id.orig_p,id.resp_h,id.resp_p,proto,service,duration,orig_bytes,resp_bytes,conn_state,local_orig,missed_bytes,history,orig_pkts,orig_ip_bytes,resp_pkts,resp_ip_bytes
1258790493.773208,ZTtgbHvf4s3,192.168.1.104,137,192.168.1.255,137,udp,dns,3.748891,350,0,S0,F,0,D,7,546,0,0
1258790451.402091,pOY6Rw7lhUd,192.168.1.106,138,192.168.1.255,138,udp,,0.000000,0,0,S0,F,0,D,1,229,0,0
1258790493.787448,pn5IiEslca9,192.168.1.104,138,192.168.1.255,138,udp,,2.243339,348,0,S0,F,0,D,2,404,0,0
1258790615.268111,D9slyIu3hFj,192.168.1.106,137,192.168.1.255,137,udp,dns,3.764626,350,0,S0,F,0,D,7,546,0,0
[...]
Add ``--separator=X`` to set a different separator.
* Extracting a subset of columns::
$ ds2txt --select '*' ts,id.resp_h,id.resp_p --skip-all conn.log
1258790493.773208 192.168.1.255 137
1258790451.402091 192.168.1.255 138
1258790493.787448 192.168.1.255 138
1258790615.268111 192.168.1.255 137
1258790615.289842 192.168.1.255 138
[...]
* Filtering rows::
$ ds2txt --where '*' 'duration > 5 && id.resp_p > 1024' --skip-all conn.ds
1258790631.532888 V8mV5WLITu5 192.168.1.105 55890 239.255.255.250 1900 udp 15.004568 798 0 S0 F 0 D 6 966 0 0
1258792413.439596 tMcWVWQptvd 192.168.1.105 55890 239.255.255.250 1900 udp 15.004581 798 0 S0 F 0 D 6 966 0 0
1258794195.346127 cQwQMRdBrKa 192.168.1.105 55890 239.255.255.250 1900 udp 15.005071 798 0 S0 F 0 D 6 966 0 0
1258795977.253200 i8TEjhWd2W8 192.168.1.105 55890 239.255.255.250 1900 udp 15.004824 798 0 S0 F 0 D 6 966 0 0
1258797759.160217 MsLsBA8Ia49 192.168.1.105 55890 239.255.255.250 1900 udp 15.005078 798 0 S0 F 0 D 6 966 0 0
1258799541.068452 TsOxRWJRGwf 192.168.1.105 55890 239.255.255.250 1900 udp 15.004082 798 0 S0 F 0 D 6 966 0 0
[...]
* Calculate some statistics:
Mean/stddev/min/max over a column::
$ dsstatgroupby '*' basic duration from conn.ds
# Begin DSStatGroupByModule
# processed 2159 rows, where clause eliminated 0 rows
# count(*), mean(duration), stddev, min, max
2159, 42.7938, 1858.34, 0, 86370
[...]
Quantiles of total connection volume::
$ dsstatgroupby '*' quantile 'orig_bytes + resp_bytes' from conn.ds
[...]
2159 data points, mean 24616 +- 343295 [0,1.26615e+07]
quantiles about every 216 data points:
10%: 0, 124, 317, 348, 350, 350, 601, 798, 1469
tails: 90%: 1469, 95%: 7302, 99%: 242629, 99.5%: 1226262
[...]
The ``man`` pages for these tools show further options, and their
``-h`` option gives some more information (either can be a bit cryptic
unfortunately though).
Deficiencies
------------
Due to limitations of the DataSeries format, one cannot inspect its
files before they have been fully written. In other words, when using
DataSeries, it's currently not possible to inspect the live log
files inside the spool directory before they are rotated to their
final location. It seems that this could be fixed with some effort,
and we will work with DataSeries development team on that if the
format gains traction among Bro users.
Likewise, we're considering writing custom command line tools for
interacting with DataSeries files, making that a bit more convenient
than what the standard utilities provide.

View file

@ -0,0 +1,89 @@
=========================================
Indexed Logging Output with ElasticSearch
=========================================
.. rst-class:: opening
Bro's default ASCII log format is not exactly the most efficient
way for searching large volumes of data. ElasticSearch
is a new data storage technology for dealing with tons of data.
It's also a search engine built on top of Apache's Lucene
project. It scales very well, both for distributed indexing and
distributed searching.
.. contents::
Warning
-------
This writer plugin is still in testing and is not yet recommended for
production use! The approach to how logs are handled in the plugin is "fire
and forget" at this time, there is no error handling if the server fails to
respond successfully to the insertion request.
Installing ElasticSearch
------------------------
Download the latest version from: <http://www.elasticsearch.org/download/>.
Once extracted, start ElasticSearch with::
# ./bin/elasticsearch
For more detailed information, refer to the ElasticSearch installation
documentation: http://www.elasticsearch.org/guide/reference/setup/installation.html
Compiling Bro with ElasticSearch Support
----------------------------------------
First, ensure that you have libcurl installed the run configure.::
# ./configure
[...]
====================| Bro Build Summary |=====================
[...]
cURL: true
[...]
ElasticSearch: true
[...]
================================================================
Activating ElasticSearch
------------------------
The easiest way to enable ElasticSearch output is to load the tuning/logs-to-
elasticsearch.bro script. If you are using BroControl, the following line in
local.bro will enable it.
.. console::
@load tuning/logs-to-elasticsearch
With that, Bro will now write most of its logs into ElasticSearch in addition
to maintaining the Ascii logs like it would do by default. That script has
some tunable options for choosing which logs to send to ElasticSearch, refer
to the autogenerated script documentation for those options.
There is an interface being written specifically to integrate with the data
that Bro outputs into ElasticSearch named Brownian. It can be found here::
https://github.com/grigorescu/Brownian
Tuning
------
A common problem encountered with ElasticSearch is too many files being held
open. The ElasticSearch website has some suggestions on how to increase the
open file limit.
- http://www.elasticsearch.org/tutorials/2011/04/06/too-many-open-files.html
TODO
----
Lots.
- Perform multicast discovery for server.
- Better error detection.
- Better defaults (don't index loaded-plugins, for instance).
-

View file

@ -373,3 +373,14 @@ record, care must be given to when and how long data is stored.
Normally data saved to the connection record will remain there for the Normally data saved to the connection record will remain there for the
duration of the connection and from a practical perspective it's not duration of the connection and from a practical perspective it's not
uncommon to need to delete that data before the end of the connection. uncommon to need to delete that data before the end of the connection.
Other Writers
-------------
Bro supports the following output formats other than ASCII:
.. toctree::
:maxdepth: 1
logging-dataseries
logging-elasticsearch

View file

@ -1,5 +1,6 @@
.. _CMake: http://www.cmake.org .. _CMake: http://www.cmake.org
.. _SWIG: http://www.swig.org .. _SWIG: http://www.swig.org
.. _Xcode: https://developer.apple.com/xcode/
.. _MacPorts: http://www.macports.org .. _MacPorts: http://www.macports.org
.. _Fink: http://www.finkproject.org .. _Fink: http://www.finkproject.org
.. _Homebrew: http://mxcl.github.com/homebrew .. _Homebrew: http://mxcl.github.com/homebrew
@ -85,17 +86,20 @@ The following dependencies are required to build Bro:
* Mac OS X * Mac OS X
Snow Leopard (10.6) comes with all required dependencies except for CMake_. Compiling source code on Macs requires first downloading Xcode_,
then going through its "Preferences..." -> "Downloads" menus to
install the "Command Line Tools" component.
Lion (10.7) comes with all required dependencies except for CMake_ and SWIG_. Lion (10.7) and Mountain Lion (10.8) come with all required
dependencies except for CMake_, SWIG_, and ``libmagic``.
Distributions of these dependencies can be obtained from the project websites Distributions of these dependencies can be obtained from the project
linked above, but they're also likely available from your preferred Mac OS X websites linked above, but they're also likely available from your
package management system (e.g. MacPorts_, Fink_, or Homebrew_). preferred Mac OS X package management system (e.g. MacPorts_, Fink_,
or Homebrew_).
Note that the MacPorts ``swig`` package may not include any specific Specifically for MacPorts, the ``swig``, ``swig-ruby``, ``swig-python``
language support so you may need to also install ``swig-ruby`` and and ``file`` packages provide the required dependencies.
``swig-python``.
Optional Dependencies Optional Dependencies
~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~

View file

@ -19,6 +19,7 @@ rest_target(${psd} base/init-bare.bro internal)
rest_target(${CMAKE_BINARY_DIR}/src base/bro.bif.bro) rest_target(${CMAKE_BINARY_DIR}/src base/bro.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/const.bif.bro) rest_target(${CMAKE_BINARY_DIR}/src base/const.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/event.bif.bro) rest_target(${CMAKE_BINARY_DIR}/src base/event.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/input.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/logging.bif.bro) rest_target(${CMAKE_BINARY_DIR}/src base/logging.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/reporter.bif.bro) rest_target(${CMAKE_BINARY_DIR}/src base/reporter.bif.bro)
rest_target(${CMAKE_BINARY_DIR}/src base/strings.bif.bro) rest_target(${CMAKE_BINARY_DIR}/src base/strings.bif.bro)
@ -31,11 +32,18 @@ rest_target(${psd} base/frameworks/cluster/setup-connections.bro)
rest_target(${psd} base/frameworks/communication/main.bro) rest_target(${psd} base/frameworks/communication/main.bro)
rest_target(${psd} base/frameworks/control/main.bro) rest_target(${psd} base/frameworks/control/main.bro)
rest_target(${psd} base/frameworks/dpd/main.bro) rest_target(${psd} base/frameworks/dpd/main.bro)
rest_target(${psd} base/frameworks/input/main.bro)
rest_target(${psd} base/frameworks/input/readers/ascii.bro)
rest_target(${psd} base/frameworks/input/readers/benchmark.bro)
rest_target(${psd} base/frameworks/input/readers/raw.bro)
rest_target(${psd} base/frameworks/intel/main.bro) rest_target(${psd} base/frameworks/intel/main.bro)
rest_target(${psd} base/frameworks/logging/main.bro) rest_target(${psd} base/frameworks/logging/main.bro)
rest_target(${psd} base/frameworks/logging/postprocessors/scp.bro) rest_target(${psd} base/frameworks/logging/postprocessors/scp.bro)
rest_target(${psd} base/frameworks/logging/postprocessors/sftp.bro) rest_target(${psd} base/frameworks/logging/postprocessors/sftp.bro)
rest_target(${psd} base/frameworks/logging/writers/ascii.bro) rest_target(${psd} base/frameworks/logging/writers/ascii.bro)
rest_target(${psd} base/frameworks/logging/writers/dataseries.bro)
rest_target(${psd} base/frameworks/logging/writers/elasticsearch.bro)
rest_target(${psd} base/frameworks/logging/writers/none.bro)
rest_target(${psd} base/frameworks/metrics/cluster.bro) rest_target(${psd} base/frameworks/metrics/cluster.bro)
rest_target(${psd} base/frameworks/metrics/main.bro) rest_target(${psd} base/frameworks/metrics/main.bro)
rest_target(${psd} base/frameworks/metrics/non-cluster.bro) rest_target(${psd} base/frameworks/metrics/non-cluster.bro)
@ -53,6 +61,7 @@ rest_target(${psd} base/frameworks/packet-filter/netstats.bro)
rest_target(${psd} base/frameworks/reporter/main.bro) rest_target(${psd} base/frameworks/reporter/main.bro)
rest_target(${psd} base/frameworks/signatures/main.bro) rest_target(${psd} base/frameworks/signatures/main.bro)
rest_target(${psd} base/frameworks/software/main.bro) rest_target(${psd} base/frameworks/software/main.bro)
rest_target(${psd} base/frameworks/tunnels/main.bro)
rest_target(${psd} base/protocols/conn/contents.bro) rest_target(${psd} base/protocols/conn/contents.bro)
rest_target(${psd} base/protocols/conn/inactivity.bro) rest_target(${psd} base/protocols/conn/inactivity.bro)
rest_target(${psd} base/protocols/conn/main.bro) rest_target(${psd} base/protocols/conn/main.bro)
@ -71,6 +80,8 @@ rest_target(${psd} base/protocols/irc/main.bro)
rest_target(${psd} base/protocols/smtp/entities-excerpt.bro) rest_target(${psd} base/protocols/smtp/entities-excerpt.bro)
rest_target(${psd} base/protocols/smtp/entities.bro) rest_target(${psd} base/protocols/smtp/entities.bro)
rest_target(${psd} base/protocols/smtp/main.bro) rest_target(${psd} base/protocols/smtp/main.bro)
rest_target(${psd} base/protocols/socks/consts.bro)
rest_target(${psd} base/protocols/socks/main.bro)
rest_target(${psd} base/protocols/ssh/main.bro) rest_target(${psd} base/protocols/ssh/main.bro)
rest_target(${psd} base/protocols/ssl/consts.bro) rest_target(${psd} base/protocols/ssl/consts.bro)
rest_target(${psd} base/protocols/ssl/main.bro) rest_target(${psd} base/protocols/ssl/main.bro)
@ -135,6 +146,7 @@ rest_target(${psd} policy/protocols/ssl/known-certs.bro)
rest_target(${psd} policy/protocols/ssl/validate-certs.bro) rest_target(${psd} policy/protocols/ssl/validate-certs.bro)
rest_target(${psd} policy/tuning/defaults/packet-fragments.bro) rest_target(${psd} policy/tuning/defaults/packet-fragments.bro)
rest_target(${psd} policy/tuning/defaults/warnings.bro) rest_target(${psd} policy/tuning/defaults/warnings.bro)
rest_target(${psd} policy/tuning/logs-to-elasticsearch.bro)
rest_target(${psd} policy/tuning/track-all-assets.bro) rest_target(${psd} policy/tuning/track-all-assets.bro)
rest_target(${psd} site/local-manager.bro) rest_target(${psd} site/local-manager.bro)
rest_target(${psd} site/local-proxy.bro) rest_target(${psd} site/local-proxy.bro)

View file

@ -55,8 +55,8 @@ The Bro scripting language supports the following built-in types.
A temporal type representing a relative time. An ``interval`` A temporal type representing a relative time. An ``interval``
constant can be written as a numeric constant followed by a time constant can be written as a numeric constant followed by a time
unit where the time unit is one of ``usec``, ``sec``, ``min``, unit where the time unit is one of ``usec``, ``msec``, ``sec``, ``min``,
``hr``, or ``day`` which respectively represent microseconds, ``hr``, or ``day`` which respectively represent microseconds, milliseconds,
seconds, minutes, hours, and days. Whitespace between the numeric seconds, minutes, hours, and days. Whitespace between the numeric
constant and time unit is optional. Appending the letter "s" to the constant and time unit is optional. Appending the letter "s" to the
time unit in order to pluralize it is also optional (to no semantic time unit in order to pluralize it is also optional (to no semantic
@ -95,14 +95,14 @@ The Bro scripting language supports the following built-in types.
and embedded. and embedded.
In exact matching the ``==`` equality relational operator is used In exact matching the ``==`` equality relational operator is used
with one :bro:type:`string` operand and one :bro:type:`pattern` with one :bro:type:`pattern` operand and one :bro:type:`string`
operand to check whether the full string exactly matches the operand (order of operands does not matter) to check whether the full
pattern. In this case, the ``^`` beginning-of-line and ``$`` string exactly matches the pattern. In exact matching, the ``^``
end-of-line anchors are redundant since pattern is implicitly beginning-of-line and ``$`` end-of-line anchors are redundant since
anchored to the beginning and end of the line to facilitate an exact the pattern is implicitly anchored to the beginning and end of the
match. For example:: line to facilitate an exact match. For example::
"foo" == /foo|bar/ /foo|bar/ == "foo"
yields true, while:: yields true, while::
@ -110,9 +110,9 @@ The Bro scripting language supports the following built-in types.
yields false. The ``!=`` operator would yield the negation of ``==``. yields false. The ``!=`` operator would yield the negation of ``==``.
In embedded matching the ``in`` operator is again used with one In embedded matching the ``in`` operator is used with one
:bro:type:`string` operand and one :bro:type:`pattern` operand :bro:type:`pattern` operand (which must be on the left-hand side) and
(which must be on the left-hand side), but tests whether the pattern one :bro:type:`string` operand, but tests whether the pattern
appears anywhere within the given string. For example:: appears anywhere within the given string. For example::
/foo|bar/ in "foobar" /foo|bar/ in "foobar"
@ -162,7 +162,11 @@ The Bro scripting language supports the following built-in types.
``A1.A2.A3.A4``, where Ai all lie between 0 and 255. ``A1.A2.A3.A4``, where Ai all lie between 0 and 255.
IPv6 address constants are written as colon-separated hexadecimal form IPv6 address constants are written as colon-separated hexadecimal form
as described by :rfc:`2373`. as described by :rfc:`2373`, but additionally encased in square brackets.
The mixed notation with embedded IPv4 addresses as dotted-quads in the
lower 32 bits is also allowed.
Some examples: ``[2001:db8::1]``, ``[::ffff:192.168.1.100]``, or
``[aaaa:bbbb:cccc:dddd:eeee:ffff:1111:2222]``.
Hostname constants can also be used, but since a hostname can Hostname constants can also be used, but since a hostname can
correspond to multiple IP addresses, the type of such variable is a correspond to multiple IP addresses, the type of such variable is a
@ -196,7 +200,7 @@ The Bro scripting language supports the following built-in types.
A type representing a block of IP addresses in CIDR notation. A A type representing a block of IP addresses in CIDR notation. A
``subnet`` constant is written as an :bro:type:`addr` followed by a ``subnet`` constant is written as an :bro:type:`addr` followed by a
slash (/) and then the network prefix size specified as a decimal slash (/) and then the network prefix size specified as a decimal
number. For example, ``192.168.0.0/16``. number. For example, ``192.168.0.0/16`` or ``[fe80::]/64``.
.. bro:type:: any .. bro:type:: any
@ -546,7 +550,12 @@ scripting language supports the following built-in attributes.
.. bro:attr:: &expire_func .. bro:attr:: &expire_func
Called right before a container element expires. Called right before a container element expires. The function's
first parameter is of the same type of the container and the second
parameter the same type of the container's index. The return
value is a :bro:type:`interval` indicating the amount of additional
time to wait before expiring the container element at the given
index (which will trigger another execution of this function).
.. bro:attr:: &read_expire .. bro:attr:: &read_expire
@ -591,10 +600,6 @@ scripting language supports the following built-in attributes.
.. TODO: needs to be documented. .. TODO: needs to be documented.
.. bro:attr:: &disable_print_hook
Deprecated. Will be removed.
.. bro:attr:: &raw_output .. bro:attr:: &raw_output
Opens a file in raw mode, i.e., non-ASCII characters are not Opens a file in raw mode, i.e., non-ASCII characters are not

View file

@ -51,13 +51,18 @@ This script contains a default event handler that raises
:bro:enum:`Signatures::Sensitive_Signature` :doc:`Notices <notice>` :bro:enum:`Signatures::Sensitive_Signature` :doc:`Notices <notice>`
(as well as others; see the beginning of the script). (as well as others; see the beginning of the script).
As signatures are independent of Bro's policy scripts, they are put As signatures are independent of Bro's policy scripts, they are put into
into their own file(s). There are two ways to specify which files their own file(s). There are three ways to specify which files contain
contain signatures: By using the ``-s`` flag when you invoke Bro, or signatures: By using the ``-s`` flag when you invoke Bro, or by
by extending the Bro variable :bro:id:`signature_files` using the ``+=`` extending the Bro variable :bro:id:`signature_files` using the ``+=``
operator. If a signature file is given without a path, it is searched operator, or by using the ``@load-sigs`` directive inside a Bro script.
along the normal ``BROPATH``. The default extension of the file name If a signature file is given without a full path, it is searched for
is ``.sig``, and Bro appends that automatically when necessary. along the normal ``BROPATH``. Additionally, the ``@load-sigs``
directive can be used to load signature files in a path relative to the
Bro script in which it's placed, e.g. ``@load-sigs ./mysigs.sig`` will
expect that signature file in the same directory as the Bro script. The
default extension of the file name is ``.sig``, and Bro appends that
automatically when necessary.
Signature language Signature language
================== ==================
@ -224,20 +229,10 @@ matched. The following context conditions are defined:
confirming the match. If false is returned, no signature match is confirming the match. If false is returned, no signature match is
going to be triggered. The function has to be of type ``function going to be triggered. The function has to be of type ``function
cond(state: signature_state, data: string): bool``. Here, cond(state: signature_state, data: string): bool``. Here,
``content`` may contain the most recent content chunk available at ``data`` may contain the most recent content chunk available at
the time the signature was matched. If no such chunk is available, the time the signature was matched. If no such chunk is available,
``content`` will be the empty string. ``signature_state`` is ``data`` will be the empty string. See :bro:type:`signature_state`
defined as follows: for its definition.
.. code:: bro
type signature_state: record {
id: string; # ID of the signature
conn: connection; # Current connection
is_orig: bool; # True if current endpoint is originator
payload_size: count; # Payload size of the first packet
};
``payload-size <cmp> <integer>`` ``payload-size <cmp> <integer>``
Compares the integer to the size of the payload of a packet. For Compares the integer to the size of the payload of a packet. For

View file

@ -3,7 +3,13 @@
# This script creates binary packages for Mac OS X. # This script creates binary packages for Mac OS X.
# They can be found in ../build/ after running. # They can be found in ../build/ after running.
./check-cmake || { exit 1; } cmake -P /dev/stdin << "EOF"
if ( ${CMAKE_VERSION} VERSION_LESS 2.8.9 )
message(FATAL_ERROR "CMake >= 2.8.9 required to build package")
endif ()
EOF
[ $? -ne 0 ] && exit 1;
type sw_vers > /dev/null 2>&1 || { type sw_vers > /dev/null 2>&1 || {
echo "Unable to get Mac OS X version" >&2; echo "Unable to get Mac OS X version" >&2;
@ -34,26 +40,26 @@ prefix=/opt/bro
cd .. cd ..
# Minimum Bro # Minimum Bro
CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \ CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
--disable-broccoli --disable-broctl --pkg-name-prefix=Bro-minimal \ --disable-broccoli --disable-broctl --pkg-name-prefix=Bro-minimal \
--binary-package --binary-package
( cd build && make package ) ( cd build && make package )
# Full Bro package # Full Bro package
CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \ CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
--pkg-name-prefix=Bro --binary-package --pkg-name-prefix=Bro --binary-package
( cd build && make package ) ( cd build && make package )
# Broccoli # Broccoli
cd aux/broccoli cd aux/broccoli
CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \ CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
--binary-package --binary-package
( cd build && make package && mv *.dmg ../../../build/ ) ( cd build && make package && mv *.dmg ../../../build/ )
cd ../.. cd ../..
# Broctl # Broctl
cd aux/broctl cd aux/broctl
CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \ CMAKE_PREFIX_PATH=/usr CMAKE_OSX_ARCHITECTURES=${arch} ./configure --prefix=${prefix} \
--binary-package --binary-package
( cd build && make package && mv *.dmg ../../../build/ ) ( cd build && make package && mv *.dmg ../../../build/ )
cd ../.. cd ../..

View file

@ -77,6 +77,9 @@ export {
node_type: NodeType; node_type: NodeType;
## The IP address of the cluster node. ## The IP address of the cluster node.
ip: addr; ip: addr;
## If the *ip* field is a non-global IPv6 address, this field
## can specify a particular :rfc:`4007` ``zone_id``.
zone_id: string &default="";
## The port to which the this local node can connect when ## The port to which the this local node can connect when
## establishing communication. ## establishing communication.
p: port; p: port;

View file

@ -19,23 +19,26 @@ event bro_init() &priority=9
# Connections from the control node for runtime control and update events. # Connections from the control node for runtime control and update events.
# Every node in a cluster is eligible for control from this host. # Every node in a cluster is eligible for control from this host.
if ( n$node_type == CONTROL ) if ( n$node_type == CONTROL )
Communication::nodes["control"] = [$host=n$ip, $connect=F, Communication::nodes["control"] = [$host=n$ip, $zone_id=n$zone_id,
$class="control", $events=control_events]; $connect=F, $class="control",
$events=control_events];
if ( me$node_type == MANAGER ) if ( me$node_type == MANAGER )
{ {
if ( n$node_type == WORKER && n$manager == node ) if ( n$node_type == WORKER && n$manager == node )
Communication::nodes[i] = Communication::nodes[i] =
[$host=n$ip, $connect=F, [$host=n$ip, $zone_id=n$zone_id, $connect=F,
$class=i, $events=worker2manager_events, $request_logs=T]; $class=i, $events=worker2manager_events, $request_logs=T];
if ( n$node_type == PROXY && n$manager == node ) if ( n$node_type == PROXY && n$manager == node )
Communication::nodes[i] = Communication::nodes[i] =
[$host=n$ip, $connect=F, [$host=n$ip, $zone_id=n$zone_id, $connect=F,
$class=i, $events=proxy2manager_events, $request_logs=T]; $class=i, $events=proxy2manager_events, $request_logs=T];
if ( n$node_type == TIME_MACHINE && me?$time_machine && me$time_machine == i ) if ( n$node_type == TIME_MACHINE && me?$time_machine && me$time_machine == i )
Communication::nodes["time-machine"] = [$host=nodes[i]$ip, $p=nodes[i]$p, Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p,
$connect=T, $retry=1min, $connect=T, $retry=1min,
$events=tm2manager_events]; $events=tm2manager_events];
} }
@ -44,7 +47,8 @@ event bro_init() &priority=9
{ {
if ( n$node_type == WORKER && n$proxy == node ) if ( n$node_type == WORKER && n$proxy == node )
Communication::nodes[i] = Communication::nodes[i] =
[$host=n$ip, $connect=F, $class=i, $sync=T, $auth=T, $events=worker2proxy_events]; [$host=n$ip, $zone_id=n$zone_id, $connect=F, $class=i,
$sync=T, $auth=T, $events=worker2proxy_events];
# accepts connections from the previous one. # accepts connections from the previous one.
# (This is not ideal for setups with many proxies) # (This is not ideal for setups with many proxies)
@ -53,16 +57,18 @@ event bro_init() &priority=9
{ {
if ( n?$proxy ) if ( n?$proxy )
Communication::nodes[i] Communication::nodes[i]
= [$host=n$ip, $p=n$p, = [$host=n$ip, $zone_id=n$zone_id, $p=n$p,
$connect=T, $auth=F, $sync=T, $retry=1mins]; $connect=T, $auth=F, $sync=T, $retry=1mins];
else if ( me?$proxy && me$proxy == i ) else if ( me?$proxy && me$proxy == i )
Communication::nodes[me$proxy] Communication::nodes[me$proxy]
= [$host=nodes[i]$ip, $connect=F, $auth=T, $sync=T]; = [$host=nodes[i]$ip, $zone_id=nodes[i]$zone_id,
$connect=F, $auth=T, $sync=T];
} }
# Finally the manager, to send it status updates. # Finally the manager, to send it status updates.
if ( n$node_type == MANAGER && me$manager == i ) if ( n$node_type == MANAGER && me$manager == i )
Communication::nodes["manager"] = [$host=nodes[i]$ip, Communication::nodes["manager"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p, $p=nodes[i]$p,
$connect=T, $retry=1mins, $connect=T, $retry=1mins,
$class=node, $class=node,
@ -72,6 +78,7 @@ event bro_init() &priority=9
{ {
if ( n$node_type == MANAGER && me$manager == i ) if ( n$node_type == MANAGER && me$manager == i )
Communication::nodes["manager"] = [$host=nodes[i]$ip, Communication::nodes["manager"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p, $p=nodes[i]$p,
$connect=T, $retry=1mins, $connect=T, $retry=1mins,
$class=node, $class=node,
@ -79,6 +86,7 @@ event bro_init() &priority=9
if ( n$node_type == PROXY && me$proxy == i ) if ( n$node_type == PROXY && me$proxy == i )
Communication::nodes["proxy"] = [$host=nodes[i]$ip, Communication::nodes["proxy"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p, $p=nodes[i]$p,
$connect=T, $retry=1mins, $connect=T, $retry=1mins,
$sync=T, $class=node, $sync=T, $class=node,
@ -87,6 +95,7 @@ event bro_init() &priority=9
if ( n$node_type == TIME_MACHINE && if ( n$node_type == TIME_MACHINE &&
me?$time_machine && me$time_machine == i ) me?$time_machine && me$time_machine == i )
Communication::nodes["time-machine"] = [$host=nodes[i]$ip, Communication::nodes["time-machine"] = [$host=nodes[i]$ip,
$zone_id=nodes[i]$zone_id,
$p=nodes[i]$p, $p=nodes[i]$p,
$connect=T, $connect=T,
$retry=1min, $retry=1min,

View file

@ -2,6 +2,7 @@
##! and/or transfer events. ##! and/or transfer events.
@load base/frameworks/packet-filter @load base/frameworks/packet-filter
@load base/utils/addrs
module Communication; module Communication;
@ -9,17 +10,31 @@ export {
## The communication logging stream identifier. ## The communication logging stream identifier.
redef enum Log::ID += { LOG }; redef enum Log::ID += { LOG };
## Which interface to listen on (0.0.0.0 for any interface). ## Which interface to listen on. The addresses ``0.0.0.0`` and ``[::]``
## are wildcards.
const listen_interface = 0.0.0.0 &redef; const listen_interface = 0.0.0.0 &redef;
## Which port to listen on. ## Which port to listen on.
const listen_port = 47757/tcp &redef; const listen_port = 47757/tcp &redef;
## This defines if a listening socket should use SSL. ## This defines if a listening socket should use SSL.
const listen_ssl = F &redef; const listen_ssl = F &redef;
## Default compression level. Compression level is 0-9, with 0 = no ## Defines if a listening socket can bind to IPv6 addresses.
const listen_ipv6 = F &redef;
## If :bro:id:`Communication::listen_interface` is a non-global
## IPv6 address and requires a specific :rfc:`4007` ``zone_id``,
## it can be specified here.
const listen_ipv6_zone_id = "" &redef;
## Defines the interval at which to retry binding to
## :bro:id:`Communication::listen_interface` on
## :bro:id:`Communication::listen_port` if it's already in use.
const listen_retry = 30 secs &redef;
## Default compression level. Compression level is 0-9, with 0 = no
## compression. ## compression.
global compression_level = 0 &redef; global compression_level = 0 &redef;
@ -27,7 +42,7 @@ export {
type Info: record { type Info: record {
## The network time at which a communication event occurred. ## The network time at which a communication event occurred.
ts: time &log; ts: time &log;
## The peer name (if any) for which a communication event is concerned. ## The peer name (if any) with which a communication event is concerned.
peer: string &log &optional; peer: string &log &optional;
## Where the communication event message originated from, that is, ## Where the communication event message originated from, that is,
## either from the scripting layer or inside the Bro process. ## either from the scripting layer or inside the Bro process.
@ -51,7 +66,11 @@ export {
type Node: record { type Node: record {
## Remote address. ## Remote address.
host: addr; host: addr;
## If the *host* field is a non-global IPv6 address, this field
## can specify a particular :rfc:`4007` ``zone_id``.
zone_id: string &optional;
## Port of the remote Bro communication endpoint if we are initiating ## Port of the remote Bro communication endpoint if we are initiating
## the connection based on the :bro:id:`connect` field. ## the connection based on the :bro:id:`connect` field.
p: port &optional; p: port &optional;
@ -101,7 +120,7 @@ export {
## The remote peer. ## The remote peer.
peer: event_peer &optional; peer: event_peer &optional;
## Indicates the status of the node. ## Indicates the status of the node.
connected: bool &default = F; connected: bool &default = F;
}; };
@ -144,7 +163,7 @@ event bro_init() &priority=5
function do_script_log_common(level: count, src: count, msg: string) function do_script_log_common(level: count, src: count, msg: string)
{ {
Log::write(Communication::LOG, [$ts = network_time(), Log::write(Communication::LOG, [$ts = network_time(),
$level = (level == REMOTE_LOG_INFO ? "info" : "error"), $level = (level == REMOTE_LOG_INFO ? "info" : "error"),
$src_name = src_names[src], $src_name = src_names[src],
$peer = get_event_peer()$descr, $peer = get_event_peer()$descr,
@ -160,7 +179,7 @@ event remote_log(level: count, src: count, msg: string)
# This is a core generated event. # This is a core generated event.
event remote_log_peer(p: event_peer, level: count, src: count, msg: string) event remote_log_peer(p: event_peer, level: count, src: count, msg: string)
{ {
local rmsg = fmt("[#%d/%s:%d] %s", p$id, p$host, p$p, msg); local rmsg = fmt("[#%d/%s:%d] %s", p$id, addr_to_uri(p$host), p$p, msg);
do_script_log_common(level, src, rmsg); do_script_log_common(level, src, rmsg);
} }
@ -178,10 +197,11 @@ function connect_peer(peer: string)
p = node$p; p = node$p;
local class = node?$class ? node$class : ""; local class = node?$class ? node$class : "";
local id = connect(node$host, p, class, node$retry, node$ssl); local zone_id = node?$zone_id ? node$zone_id : "";
local id = connect(node$host, zone_id, p, class, node$retry, node$ssl);
if ( id == PEER_ID_NONE ) if ( id == PEER_ID_NONE )
Log::write(Communication::LOG, [$ts = network_time(), Log::write(Communication::LOG, [$ts = network_time(),
$peer = get_event_peer()$descr, $peer = get_event_peer()$descr,
$message = "can't trigger connect"]); $message = "can't trigger connect"]);
pending_peers[id] = node; pending_peers[id] = node;
@ -320,7 +340,7 @@ event bro_init() &priority = -10 # let others modify nodes
{ {
if ( |nodes| > 0 ) if ( |nodes| > 0 )
enable_communication(); enable_communication();
for ( tag in nodes ) for ( tag in nodes )
{ {
if ( ! nodes[tag]$connect ) if ( ! nodes[tag]$connect )

View file

@ -11,6 +11,10 @@ export {
## The port of the host that will be controlled. ## The port of the host that will be controlled.
const host_port = 0/tcp &redef; const host_port = 0/tcp &redef;
## If :bro:id:`Control::host` is a non-global IPv6 address and
## requires a specific :rfc:`4007` ``zone_id``, it can be set here.
const zone_id = "" &redef;
## The command that is being done. It's typically set on the ## The command that is being done. It's typically set on the
## command line. ## command line.
const cmd = "" &redef; const cmd = "" &redef;

View file

@ -149,3 +149,64 @@ signature dpd_ssl_client {
payload /^(\x16\x03[\x00\x01\x02]..\x01...\x03[\x00\x01\x02]|...?\x01[\x00\x01\x02][\x02\x03]).*/ payload /^(\x16\x03[\x00\x01\x02]..\x01...\x03[\x00\x01\x02]|...?\x01[\x00\x01\x02][\x02\x03]).*/
tcp-state originator tcp-state originator
} }
signature dpd_ayiya {
ip-proto = udp
payload /^..\x11\x29/
enable "ayiya"
}
signature dpd_teredo {
ip-proto = udp
payload /^(\x00\x00)|(\x00\x01)|([\x60-\x6f])/
enable "teredo"
}
signature dpd_socks4_client {
ip-proto == tcp
# '32' is a rather arbitrary max length for the user name.
payload /^\x04[\x01\x02].{0,32}\x00/
tcp-state originator
}
signature dpd_socks4_server {
ip-proto == tcp
requires-reverse-signature dpd_socks4_client
payload /^\x00[\x5a\x5b\x5c\x5d]/
tcp-state responder
enable "socks"
}
signature dpd_socks4_reverse_client {
ip-proto == tcp
# '32' is a rather arbitrary max length for the user name.
payload /^\x04[\x01\x02].{0,32}\x00/
tcp-state responder
}
signature dpd_socks4_reverse_server {
ip-proto == tcp
requires-reverse-signature dpd_socks4_reverse_client
payload /^\x00[\x5a\x5b\x5c\x5d]/
tcp-state originator
enable "socks"
}
signature dpd_socks5_client {
ip-proto == tcp
# Watch for a few authentication methods to reduce false positives.
payload /^\x05.[\x00\x01\x02]/
tcp-state originator
}
signature dpd_socks5_server {
ip-proto == tcp
requires-reverse-signature dpd_socks5_client
# Watch for a single authentication method to be chosen by the server or
# the server to indicate the no authentication is required.
payload /^\x05(\x00|\x01[\x00\x01\x02])/
tcp-state responder
enable "socks"
}

View file

@ -3,8 +3,7 @@
module DPD; module DPD;
## Add the DPD signatures to the signature framework. @load-sigs ./dpd.sig
redef signature_files += "base/frameworks/dpd/dpd.sig";
export { export {
## Add the DPD logging stream identifier. ## Add the DPD logging stream identifier.
@ -105,5 +104,8 @@ event protocol_violation(c: connection, atype: count, aid: count,
reason: string) &priority=-5 reason: string) &priority=-5
{ {
if ( c?$dpd ) if ( c?$dpd )
{
Log::write(DPD::LOG, c$dpd); Log::write(DPD::LOG, c$dpd);
delete c$dpd;
}
} }

View file

@ -0,0 +1,5 @@
@load ./main
@load ./readers/ascii
@load ./readers/raw
@load ./readers/benchmark

View file

@ -0,0 +1,156 @@
##! The input framework provides a way to read previously stored data either
##! as an event stream or into a bro table.
module Input;
export {
## The default input reader used. Defaults to `READER_ASCII`.
const default_reader = READER_ASCII &redef;
## The default reader mode used. Defaults to `MANUAL`.
const default_mode = MANUAL &redef;
## Flag that controls if the input framework accepts records
## that contain types that are not supported (at the moment
## file and function). If true, the input framework will
## warn in these cases, but continue. If false, it will
## abort. Defaults to false (abort)
const accept_unsupported_types = F &redef;
## TableFilter description type used for the `table` method.
type TableDescription: record {
## Common definitions for tables and events
## String that allows the reader to find the source.
## For `READER_ASCII`, this is the filename.
source: string;
## Reader to use for this stream
reader: Reader &default=default_reader;
## Read mode to use for this stream
mode: Mode &default=default_mode;
## Descriptive name. Used to remove a stream at a later time
name: string;
# Special definitions for tables
## Table which will receive the data read by the input framework
destination: any;
## Record that defines the values used as the index of the table
idx: any;
## Record that defines the values used as the elements of the table
## If val is undefined, destination has to be a set.
val: any &optional;
## Defines if the value of the table is a record (default), or a single value. Val
## can only contain one element when this is set to false.
want_record: bool &default=T;
## The event that is raised each time a value is added to, changed in or removed
## from the table. The event will receive an Input::Event enum as the first
## argument, the idx record as the second argument and the value (record) as the
## third argument.
ev: any &optional; # event containing idx, val as values.
## Predicate function that can decide if an insertion, update or removal should
## really be executed. Parameters are the same as for the event. If true is
## returned, the update is performed. If false is returned, it is skipped.
pred: function(typ: Input::Event, left: any, right: any): bool &optional;
## A key/value table that will be passed on the reader.
## Interpretation of the values is left to the writer, but
## usually they will be used for configuration purposes.
config: table[string] of string &default=table();
};
## EventFilter description type used for the `event` method.
type EventDescription: record {
## Common definitions for tables and events
## String that allows the reader to find the source.
## For `READER_ASCII`, this is the filename.
source: string;
## Reader to use for this steam
reader: Reader &default=default_reader;
## Read mode to use for this stream
mode: Mode &default=default_mode;
## Descriptive name. Used to remove a stream at a later time
name: string;
# Special definitions for events
## Record describing the fields to be retrieved from the source input.
fields: any;
## If want_record if false, the event receives each value in fields as a separate argument.
## If it is set to true (default), the event receives all fields in a single record value.
want_record: bool &default=T;
## The event that is raised each time a new line is received from the reader.
## The event will receive an Input::Event enum as the first element, and the fields as the following arguments.
ev: any;
## A key/value table that will be passed on the reader.
## Interpretation of the values is left to the writer, but
## usually they will be used for configuration purposes.
config: table[string] of string &default=table();
};
## Create a new table input from a given source. Returns true on success.
##
## description: `TableDescription` record describing the source.
global add_table: function(description: Input::TableDescription) : bool;
## Create a new event input from a given source. Returns true on success.
##
## description: `TableDescription` record describing the source.
global add_event: function(description: Input::EventDescription) : bool;
## Remove a input stream. Returns true on success and false if the named stream was not found.
##
## id: string value identifying the stream to be removed
global remove: function(id: string) : bool;
## Forces the current input to be checked for changes.
## Returns true on success and false if the named stream was not found
##
## id: string value identifying the stream
global force_update: function(id: string) : bool;
## Event that is called, when the update of a specific source is finished
global update_finished: event(name: string, source:string);
}
@load base/input.bif
module Input;
function add_table(description: Input::TableDescription) : bool
{
return __create_table_stream(description);
}
function add_event(description: Input::EventDescription) : bool
{
return __create_event_stream(description);
}
function remove(id: string) : bool
{
return __remove_stream(id);
}
function force_update(id: string) : bool
{
return __force_update(id);
}

View file

@ -0,0 +1,21 @@
##! Interface for the ascii input reader.
##!
##! The defaults are set to match Bro's ASCII output.
module InputAscii;
export {
## Separator between fields.
## Please note that the separator has to be exactly one character long
const separator = "\t" &redef;
## Separator between set elements.
## Please note that the separator has to be exactly one character long
const set_separator = "," &redef;
## String to use for empty fields.
const empty_field = "(empty)" &redef;
## String to use for an unset &optional field.
const unset_field = "-" &redef;
}

View file

@ -0,0 +1,23 @@
##! Interface for the ascii input reader.
module InputBenchmark;
export {
## multiplication factor for each second
const factor = 1.0 &redef;
## spread factor between lines
const spread = 0 &redef;
## spreading where usleep = 1000000 / autospread * num_lines
const autospread = 0.0 &redef;
## addition factor for each heartbeat
const addfactor = 0 &redef;
## stop spreading at x lines per heartbeat
const stopspreadat = 0 &redef;
## 1 -> enable timed spreading
const timedspread = 0.0 &redef;
}

View file

@ -0,0 +1,9 @@
##! Interface for the raw input reader.
module InputRaw;
export {
## Separator between input records.
## Please note that the separator has to be exactly one character long
const record_separator = "\n" &redef;
}

View file

@ -1,3 +1,6 @@
@load ./main @load ./main
@load ./postprocessors @load ./postprocessors
@load ./writers/ascii @load ./writers/ascii
@load ./writers/dataseries
@load ./writers/elasticsearch
@load ./writers/none

View file

@ -96,6 +96,12 @@ export {
## file name. Generally, filenames are expected to given ## file name. Generally, filenames are expected to given
## without any extensions; writers will add appropiate ## without any extensions; writers will add appropiate
## extensions automatically. ## extensions automatically.
##
## If this path is found to conflict with another filter's
## for the same writer type, it is automatically corrected
## by appending "-N", where N is the smallest integer greater
## or equal to 2 that allows the corrected path name to not
## conflict with another filter's.
path: string &optional; path: string &optional;
## A function returning the output path for recording entries ## A function returning the output path for recording entries
@ -115,7 +121,10 @@ export {
## rec: An instance of the streams's ``columns`` type with its ## rec: An instance of the streams's ``columns`` type with its
## fields set to the values to be logged. ## fields set to the values to be logged.
## ##
## Returns: The path to be used for the filter. ## Returns: The path to be used for the filter, which will be subject
## to the same automatic correction rules as the *path*
## field of :bro:type:`Log::Filter` in the case of conflicts
## with other filters trying to use the same writer/path pair.
path_func: function(id: ID, path: string, rec: any): string &optional; path_func: function(id: ID, path: string, rec: any): string &optional;
## Subset of column names to record. If not given, all ## Subset of column names to record. If not given, all
@ -138,6 +147,11 @@ export {
## Callback function to trigger for rotated files. If not set, the ## Callback function to trigger for rotated files. If not set, the
## default comes out of :bro:id:`Log::default_rotation_postprocessors`. ## default comes out of :bro:id:`Log::default_rotation_postprocessors`.
postprocessor: function(info: RotationInfo) : bool &optional; postprocessor: function(info: RotationInfo) : bool &optional;
## A key/value table that will be passed on to the writer.
## Interpretation of the values is left to the writer, but
## usually they will be used for configuration purposes.
config: table[string] of string &default=table();
}; };
## Sentinel value for indicating that a filter was not found when looked up. ## Sentinel value for indicating that a filter was not found when looked up.
@ -313,6 +327,11 @@ export {
## Log::default_rotation_postprocessor_cmd ## Log::default_rotation_postprocessor_cmd
## Log::default_rotation_postprocessors ## Log::default_rotation_postprocessors
global run_rotation_postprocessor_cmd: function(info: RotationInfo, npath: string) : bool; global run_rotation_postprocessor_cmd: function(info: RotationInfo, npath: string) : bool;
## The streams which are currently active and not disabled.
## This table is not meant to be modified by users! Only use it for
## examining which streams are active.
global active_streams: table[ID] of Stream = table();
} }
# We keep a script-level copy of all filters so that we can manipulate them. # We keep a script-level copy of all filters so that we can manipulate them.
@ -327,20 +346,23 @@ function __default_rotation_postprocessor(info: RotationInfo) : bool
{ {
if ( info$writer in default_rotation_postprocessors ) if ( info$writer in default_rotation_postprocessors )
return default_rotation_postprocessors[info$writer](info); return default_rotation_postprocessors[info$writer](info);
else
# Return T by default so that postprocessor-less writers don't shutdown.
return T;
} }
function default_path_func(id: ID, path: string, rec: any) : string function default_path_func(id: ID, path: string, rec: any) : string
{ {
# The suggested path value is a previous result of this function
# or a filter path explicitly set by the user, so continue using it.
if ( path != "" )
return path;
local id_str = fmt("%s", id); local id_str = fmt("%s", id);
local parts = split1(id_str, /::/); local parts = split1(id_str, /::/);
if ( |parts| == 2 ) if ( |parts| == 2 )
{ {
# The suggested path value is a previous result of this function
# or a filter path explicitly set by the user, so continue using it.
if ( path != "" )
return path;
# Example: Notice::LOG -> "notice" # Example: Notice::LOG -> "notice"
if ( parts[2] == "LOG" ) if ( parts[2] == "LOG" )
{ {
@ -356,11 +378,11 @@ function default_path_func(id: ID, path: string, rec: any) : string
output = cat(output, sub_bytes(module_parts[4],1,1), "_", sub_bytes(module_parts[4], 2, |module_parts[4]|)); output = cat(output, sub_bytes(module_parts[4],1,1), "_", sub_bytes(module_parts[4], 2, |module_parts[4]|));
return to_lower(output); return to_lower(output);
} }
# Example: Notice::POLICY_LOG -> "notice_policy" # Example: Notice::POLICY_LOG -> "notice_policy"
if ( /_LOG$/ in parts[2] ) if ( /_LOG$/ in parts[2] )
parts[2] = sub(parts[2], /_LOG$/, ""); parts[2] = sub(parts[2], /_LOG$/, "");
return cat(to_lower(parts[1]),"_",to_lower(parts[2])); return cat(to_lower(parts[1]),"_",to_lower(parts[2]));
} }
else else
@ -376,13 +398,16 @@ function run_rotation_postprocessor_cmd(info: RotationInfo, npath: string) : boo
if ( pp_cmd == "" ) if ( pp_cmd == "" )
return T; return T;
# Turn, e.g., Log::WRITER_ASCII into "ascii".
local writer = subst_string(to_lower(fmt("%s", info$writer)), "log::writer_", "");
# The date format is hard-coded here to provide a standardized # The date format is hard-coded here to provide a standardized
# script interface. # script interface.
system(fmt("%s %s %s %s %s %d", system(fmt("%s %s %s %s %s %d %s",
pp_cmd, npath, info$path, pp_cmd, npath, info$path,
strftime("%y-%m-%d_%H.%M.%S", info$open), strftime("%y-%m-%d_%H.%M.%S", info$open),
strftime("%y-%m-%d_%H.%M.%S", info$close), strftime("%y-%m-%d_%H.%M.%S", info$close),
info$terminating)); info$terminating, writer));
return T; return T;
} }
@ -392,11 +417,15 @@ function create_stream(id: ID, stream: Stream) : bool
if ( ! __create_stream(id, stream) ) if ( ! __create_stream(id, stream) )
return F; return F;
active_streams[id] = stream;
return add_default_filter(id); return add_default_filter(id);
} }
function disable_stream(id: ID) : bool function disable_stream(id: ID) : bool
{ {
delete active_streams[id];
return __disable_stream(id); return __disable_stream(id);
} }
@ -407,7 +436,7 @@ function add_filter(id: ID, filter: Filter) : bool
# definition. # definition.
if ( ! filter?$path_func ) if ( ! filter?$path_func )
filter$path_func = default_path_func; filter$path_func = default_path_func;
filters[id, filter$name] = filter; filters[id, filter$name] = filter;
return __add_filter(id, filter); return __add_filter(id, filter);
} }

View file

@ -8,12 +8,13 @@ export {
## into files. This is primarily for debugging purposes. ## into files. This is primarily for debugging purposes.
const output_to_stdout = F &redef; const output_to_stdout = F &redef;
## If true, include a header line with column names and description ## If true, include lines with log meta information such as column names with
## of the other ASCII logging options that were used. ## types, the values of ASCII logging options that in use, and the time when the
const include_header = T &redef; ## file was opened and closes (the latter at the end).
const include_meta = T &redef;
## Prefix for the header line if included. ## Prefix for lines with meta information.
const header_prefix = "#" &redef; const meta_prefix = "#" &redef;
## Separator between fields. ## Separator between fields.
const separator = "\t" &redef; const separator = "\t" &redef;

View file

@ -0,0 +1,60 @@
##! Interface for the DataSeries log writer.
module LogDataSeries;
export {
## Compression to use with the DS output file. Options are:
##
## 'none' -- No compression.
## 'lzf' -- LZF compression. Very quick, but leads to larger output files.
## 'lzo' -- LZO compression. Very fast decompression times.
## 'gz' -- GZIP compression. Slower than LZF, but also produces smaller output.
## 'bz2' -- BZIP2 compression. Slower than GZIP, but also produces smaller output.
const compression = "gz" &redef;
## The extent buffer size.
## Larger values here lead to better compression and more efficient writes, but
## also increase the lag between the time events are received and the time they
## are actually written to disk.
const extent_size = 65536 &redef;
## Should we dump the XML schema we use for this DS file to disk?
## If yes, the XML schema shares the name of the logfile, but has
## an XML ending.
const dump_schema = F &redef;
## How many threads should DataSeries spawn to perform compression?
## Note that this dictates the number of threads per log stream. If
## you're using a lot of streams, you may want to keep this number
## relatively small.
##
## Default value is 1, which will spawn one thread / stream.
##
## Maximum is 128, minimum is 1.
const num_threads = 1 &redef;
## Should time be stored as an integer or a double?
## Storing time as a double leads to possible precision issues and
## can (significantly) increase the size of the resulting DS log.
## That said, timestamps stored in double form are consistent
## with the rest of Bro, including the standard ASCII log. Hence, we
## use them by default.
const use_integer_for_time = F &redef;
}
# Default function to postprocess a rotated DataSeries log file. It moves the
# rotated file to a new name that includes a timestamp with the opening time, and
# then runs the writer's default postprocessor command on it.
function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool
{
# Move file to name including both opening and closing time.
local dst = fmt("%s.%s.ds", info$path,
strftime(Log::default_rotation_date_format, info$open));
system(fmt("/bin/mv %s %s", info$fname, dst));
# Run default postprocessor.
return Log::run_rotation_postprocessor_cmd(info, dst);
}
redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func };

View file

@ -0,0 +1,46 @@
##! Log writer for sending logs to an ElasticSearch server.
##!
##! Note: This module is in testing and is not yet considered stable!
##!
##! There is one known memory issue. If your elasticsearch server is
##! running slowly and taking too long to return from bulk insert
##! requests, the message queue to the writer thread will continue
##! growing larger and larger giving the appearance of a memory leak.
module LogElasticSearch;
export {
## Name of the ES cluster
const cluster_name = "elasticsearch" &redef;
## ES Server
const server_host = "127.0.0.1" &redef;
## ES Port
const server_port = 9200 &redef;
## Name of the ES index
const index_prefix = "bro" &redef;
## The ES type prefix comes before the name of the related log.
## e.g. prefix = "bro\_" would create types of bro_dns, bro_software, etc.
const type_prefix = "" &redef;
## The time before an ElasticSearch transfer will timeout.
## This is not working!
const transfer_timeout = 2secs;
## The batch size is the number of messages that will be queued up before
## they are sent to be bulk indexed.
const max_batch_size = 1000 &redef;
## The maximum amount of wall-clock time that is allowed to pass without
## finishing a bulk log send. This represents the maximum delay you
## would like to have with your logs before they are sent to ElasticSearch.
const max_batch_interval = 1min &redef;
## The maximum byte size for a buffered JSON string to send to the bulk
## insert API.
const max_byte_size = 1024 * 1024 &redef;
}

View file

@ -0,0 +1,17 @@
##! Interface for the None log writer. Thiis writer is mainly for debugging.
module LogNone;
export {
## If true, output debugging output that can be useful for unit
## testing the logging framework.
const debug = F &redef;
}
function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool
{
return T;
}
redef Log::default_rotation_postprocessors += { [Log::WRITER_NONE] = default_rotation_postprocessor_func };

View file

@ -23,7 +23,10 @@ redef Cluster::worker2manager_events += /Notice::cluster_notice/;
@if ( Cluster::local_node_type() != Cluster::MANAGER ) @if ( Cluster::local_node_type() != Cluster::MANAGER )
# The notice policy is completely handled by the manager and shouldn't be # The notice policy is completely handled by the manager and shouldn't be
# done by workers or proxies to save time for packet processing. # done by workers or proxies to save time for packet processing.
redef policy = {}; event bro_init() &priority=-11
{
Notice::policy = table();
}
event Notice::begin_suppression(n: Notice::Info) event Notice::begin_suppression(n: Notice::Info)
{ {

View file

@ -1,5 +1,5 @@
##! This framework is intended to create an output and filtering path for ##! This framework is intended to create an output and filtering path for
##! internal messages/warnings/errors. It should typically be loaded to ##! internal messages/warnings/errors. It should typically be loaded to
##! avoid Bro spewing internal messages to standard error and instead log ##! avoid Bro spewing internal messages to standard error and instead log
##! them to a file in a standard way. Note that this framework deals with ##! them to a file in a standard way. Note that this framework deals with
##! the handling of internally-generated reporter messages, for the ##! the handling of internally-generated reporter messages, for the
@ -13,11 +13,11 @@ export {
redef enum Log::ID += { LOG }; redef enum Log::ID += { LOG };
## An indicator of reporter message severity. ## An indicator of reporter message severity.
type Level: enum { type Level: enum {
## Informational, not needing specific attention. ## Informational, not needing specific attention.
INFO, INFO,
## Warning of a potential problem. ## Warning of a potential problem.
WARNING, WARNING,
## A non-fatal error that should be addressed, but doesn't ## A non-fatal error that should be addressed, but doesn't
## terminate program execution. ## terminate program execution.
ERROR ERROR
@ -36,24 +36,55 @@ export {
## Not all reporter messages will have locations in them though. ## Not all reporter messages will have locations in them though.
location: string &log &optional; location: string &log &optional;
}; };
## Tunable for sending reporter warning messages to STDERR. The option to
## turn it off is presented here in case Bro is being run by some
## external harness and shouldn't output anything to the console.
const warnings_to_stderr = T &redef;
## Tunable for sending reporter error messages to STDERR. The option to
## turn it off is presented here in case Bro is being run by some
## external harness and shouldn't output anything to the console.
const errors_to_stderr = T &redef;
} }
global stderr: file;
event bro_init() &priority=5 event bro_init() &priority=5
{ {
Log::create_stream(Reporter::LOG, [$columns=Info]); Log::create_stream(Reporter::LOG, [$columns=Info]);
if ( errors_to_stderr || warnings_to_stderr )
stderr = open("/dev/stderr");
} }
event reporter_info(t: time, msg: string, location: string) event reporter_info(t: time, msg: string, location: string) &priority=-5
{ {
Log::write(Reporter::LOG, [$ts=t, $level=INFO, $message=msg, $location=location]); Log::write(Reporter::LOG, [$ts=t, $level=INFO, $message=msg, $location=location]);
} }
event reporter_warning(t: time, msg: string, location: string) event reporter_warning(t: time, msg: string, location: string) &priority=-5
{ {
if ( warnings_to_stderr )
{
if ( t > double_to_time(0.0) )
print stderr, fmt("WARNING: %.6f %s (%s)", t, msg, location);
else
print stderr, fmt("WARNING: %s (%s)", msg, location);
}
Log::write(Reporter::LOG, [$ts=t, $level=WARNING, $message=msg, $location=location]); Log::write(Reporter::LOG, [$ts=t, $level=WARNING, $message=msg, $location=location]);
} }
event reporter_error(t: time, msg: string, location: string) event reporter_error(t: time, msg: string, location: string) &priority=-5
{ {
if ( errors_to_stderr )
{
if ( t > double_to_time(0.0) )
print stderr, fmt("ERROR: %.6f %s (%s)", t, msg, location);
else
print stderr, fmt("ERROR: %s (%s)", msg, location);
}
Log::write(Reporter::LOG, [$ts=t, $level=ERROR, $message=msg, $location=location]); Log::write(Reporter::LOG, [$ts=t, $level=ERROR, $message=msg, $location=location]);
} }

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,149 @@
##! This script handles the tracking/logging of tunnels (e.g. Teredo,
##! AYIYA, or IP-in-IP such as 6to4 where "IP" is either IPv4 or IPv6).
##!
##! For any connection that occurs over a tunnel, information about its
##! encapsulating tunnels is also found in the *tunnel* field of
##! :bro:type:`connection`.
module Tunnel;
export {
## The tunnel logging stream identifier.
redef enum Log::ID += { LOG };
## Types of interesting activity that can occur with a tunnel.
type Action: enum {
## A new tunnel (encapsulating "connection") has been seen.
DISCOVER,
## A tunnel connection has closed.
CLOSE,
## No new connections over a tunnel happened in the amount of
## time indicated by :bro:see:`Tunnel::expiration_interval`.
EXPIRE,
};
## The record type which contains column fields of the tunnel log.
type Info: record {
## Time at which some tunnel activity occurred.
ts: time &log;
## The unique identifier for the tunnel, which may correspond
## to a :bro:type:`connection`'s *uid* field for non-IP-in-IP tunnels.
## This is optional because there could be numerous connections
## for payload proxies like SOCKS but we should treat it as a single
## tunnel.
uid: string &log &optional;
## The tunnel "connection" 4-tuple of endpoint addresses/ports.
## For an IP tunnel, the ports will be 0.
id: conn_id &log;
## The type of tunnel.
tunnel_type: Tunnel::Type &log;
## The type of activity that occurred.
action: Action &log;
};
## Logs all tunnels in an encapsulation chain with action
## :bro:see:`Tunnel::DISCOVER` that aren't already in the
## :bro:id:`Tunnel::active` table and adds them if not.
global register_all: function(ecv: EncapsulatingConnVector);
## Logs a single tunnel "connection" with action
## :bro:see:`Tunnel::DISCOVER` if it's not already in the
## :bro:id:`Tunnel::active` table and adds it if not.
global register: function(ec: EncapsulatingConn);
## Logs a single tunnel "connection" with action
## :bro:see:`Tunnel::EXPIRE` and removes it from the
## :bro:id:`Tunnel::active` table.
##
## t: A table of tunnels.
##
## idx: The index of the tunnel table corresponding to the tunnel to expire.
##
## Returns: 0secs, which when this function is used as an
## :bro:attr:`&expire_func`, indicates to remove the element at
## *idx* immediately.
global expire: function(t: table[conn_id] of Info, idx: conn_id): interval;
## Removes a single tunnel from the :bro:id:`Tunnel::active` table
## and logs the closing/expiration of the tunnel.
##
## tunnel: The tunnel which has closed or expired.
##
## action: The specific reason for the tunnel ending.
global close: function(tunnel: Info, action: Action);
## The amount of time a tunnel is not used in establishment of new
## connections before it is considered inactive/expired.
const expiration_interval = 1hrs &redef;
## Currently active tunnels. That is, tunnels for which new, encapsulated
## connections have been seen in the interval indicated by
## :bro:see:`Tunnel::expiration_interval`.
global active: table[conn_id] of Info = table() &read_expire=expiration_interval &expire_func=expire;
}
const ayiya_ports = { 5072/udp };
redef dpd_config += { [ANALYZER_AYIYA] = [$ports = ayiya_ports] };
const teredo_ports = { 3544/udp };
redef dpd_config += { [ANALYZER_TEREDO] = [$ports = teredo_ports] };
redef likely_server_ports += { ayiya_ports, teredo_ports };
event bro_init() &priority=5
{
Log::create_stream(Tunnel::LOG, [$columns=Info]);
}
function register_all(ecv: EncapsulatingConnVector)
{
for ( i in ecv )
register(ecv[i]);
}
function register(ec: EncapsulatingConn)
{
if ( ec$cid !in active )
{
local tunnel: Info;
tunnel$ts = network_time();
if ( ec?$uid )
tunnel$uid = ec$uid;
tunnel$id = ec$cid;
tunnel$action = DISCOVER;
tunnel$tunnel_type = ec$tunnel_type;
active[ec$cid] = tunnel;
Log::write(LOG, tunnel);
}
}
function close(tunnel: Info, action: Action)
{
tunnel$action = action;
tunnel$ts = network_time();
Log::write(LOG, tunnel);
delete active[tunnel$id];
}
function expire(t: table[conn_id] of Info, idx: conn_id): interval
{
close(t[idx], EXPIRE);
return 0secs;
}
event new_connection(c: connection) &priority=5
{
if ( c?$tunnel )
register_all(c$tunnel);
}
event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5
{
register_all(e);
}
event connection_state_remove(c: connection) &priority=-5
{
if ( c$id in active )
close(active[c$id], CLOSE);
}

File diff suppressed because it is too large Load diff

View file

@ -29,6 +29,7 @@
@load base/frameworks/metrics @load base/frameworks/metrics
@load base/frameworks/intel @load base/frameworks/intel
@load base/frameworks/reporter @load base/frameworks/reporter
@load base/frameworks/tunnels
@load base/protocols/conn @load base/protocols/conn
@load base/protocols/dns @load base/protocols/dns
@ -36,6 +37,7 @@
@load base/protocols/http @load base/protocols/http
@load base/protocols/irc @load base/protocols/irc
@load base/protocols/smtp @load base/protocols/smtp
@load base/protocols/socks
@load base/protocols/ssh @load base/protocols/ssh
@load base/protocols/ssl @load base/protocols/ssl
@load base/protocols/syslog @load base/protocols/syslog

View file

@ -17,7 +17,7 @@ export {
type Info: record { type Info: record {
## This is the time of the first packet. ## This is the time of the first packet.
ts: time &log; ts: time &log;
## A unique identifier of a connection. ## A unique identifier of the connection.
uid: string &log; uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports. ## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log; id: conn_id &log;
@ -30,7 +30,7 @@ export {
## tear-downs, this will not include the final ACK. ## tear-downs, this will not include the final ACK.
duration: interval &log &optional; duration: interval &log &optional;
## The number of payload bytes the originator sent. For TCP ## The number of payload bytes the originator sent. For TCP
## this is taken from sequence numbers and might be inaccurate ## this is taken from sequence numbers and might be inaccurate
## (e.g., due to large connections) ## (e.g., due to large connections)
orig_bytes: count &log &optional; orig_bytes: count &log &optional;
## The number of payload bytes the responder sent. See ``orig_bytes``. ## The number of payload bytes the responder sent. See ``orig_bytes``.
@ -54,21 +54,21 @@ export {
## OTH No SYN seen, just midstream traffic (a "partial connection" that was not later closed). ## OTH No SYN seen, just midstream traffic (a "partial connection" that was not later closed).
## ========== =============================================== ## ========== ===============================================
conn_state: string &log &optional; conn_state: string &log &optional;
## If the connection is originated locally, this value will be T. If ## If the connection is originated locally, this value will be T. If
## it was originated remotely it will be F. In the case that the ## it was originated remotely it will be F. In the case that the
## :bro:id:`Site::local_nets` variable is undefined, this field will ## :bro:id:`Site::local_nets` variable is undefined, this field will
## be left empty at all times. ## be left empty at all times.
local_orig: bool &log &optional; local_orig: bool &log &optional;
## Indicates the number of bytes missed in content gaps which is ## Indicates the number of bytes missed in content gaps, which is
## representative of packet loss. A value other than zero will ## representative of packet loss. A value other than zero will
## normally cause protocol analysis to fail but some analysis may ## normally cause protocol analysis to fail but some analysis may
## have been completed prior to the packet loss. ## have been completed prior to the packet loss.
missed_bytes: count &log &default=0; missed_bytes: count &log &default=0;
## Records the state history of connections as a string of letters. ## Records the state history of connections as a string of letters.
## For TCP connections the meaning of those letters is: ## The meaning of those letters is:
## ##
## ====== ==================================================== ## ====== ====================================================
## Letter Meaning ## Letter Meaning
@ -83,24 +83,29 @@ export {
## i inconsistent packet (e.g. SYN+RST bits both set) ## i inconsistent packet (e.g. SYN+RST bits both set)
## ====== ==================================================== ## ====== ====================================================
## ##
## If the letter is in upper case it means the event comes from the ## If the event comes from the originator, the letter is in upper-case; if it comes
## originator and lower case then means the responder. ## from the responder, it's in lower-case. Multiple packets of the same type will
## Also, there is compression. We only record one "d" in each direction, ## only be noted once (e.g. we only record one "d" in each direction, regardless of
## for instance. I.e., we just record that data went in that direction. ## how many data packets were seen.)
## This history is not meant to encode how much data that happened to
## be.
history: string &log &optional; history: string &log &optional;
## Number of packets the originator sent. ## Number of packets that the originator sent.
## Only set if :bro:id:`use_conn_size_analyzer` = T ## Only set if :bro:id:`use_conn_size_analyzer` = T
orig_pkts: count &log &optional; orig_pkts: count &log &optional;
## Number IP level bytes the originator sent (as seen on the wire, ## Number of IP level bytes that the originator sent (as seen on the wire,
## taken from IP total_length header field). ## taken from IP total_length header field).
## Only set if :bro:id:`use_conn_size_analyzer` = T ## Only set if :bro:id:`use_conn_size_analyzer` = T
orig_ip_bytes: count &log &optional; orig_ip_bytes: count &log &optional;
## Number of packets the responder sent. See ``orig_pkts``. ## Number of packets that the responder sent.
## Only set if :bro:id:`use_conn_size_analyzer` = T
resp_pkts: count &log &optional; resp_pkts: count &log &optional;
## Number IP level bytes the responder sent. See ``orig_pkts``. ## Number og IP level bytes that the responder sent (as seen on the wire,
## taken from IP total_length header field).
## Only set if :bro:id:`use_conn_size_analyzer` = T
resp_ip_bytes: count &log &optional; resp_ip_bytes: count &log &optional;
## If this connection was over a tunnel, indicate the
## *uid* values for any encapsulating parent connections
## used over the lifetime of this inner connection.
tunnel_parents: set[string] &log;
}; };
## Event that can be handled to access the :bro:type:`Conn::Info` ## Event that can be handled to access the :bro:type:`Conn::Info`
@ -190,13 +195,15 @@ function set_conn(c: connection, eoc: bool)
c$conn$ts=c$start_time; c$conn$ts=c$start_time;
c$conn$uid=c$uid; c$conn$uid=c$uid;
c$conn$id=c$id; c$conn$id=c$id;
if ( c?$tunnel && |c$tunnel| > 0 )
add c$conn$tunnel_parents[c$tunnel[|c$tunnel|-1]$uid];
c$conn$proto=get_port_transport_proto(c$id$resp_p); c$conn$proto=get_port_transport_proto(c$id$resp_p);
if( |Site::local_nets| > 0 ) if( |Site::local_nets| > 0 )
c$conn$local_orig=Site::is_local_addr(c$id$orig_h); c$conn$local_orig=Site::is_local_addr(c$id$orig_h);
if ( eoc ) if ( eoc )
{ {
if ( c$duration > 0secs ) if ( c$duration > 0secs )
{ {
c$conn$duration=c$duration; c$conn$duration=c$duration;
c$conn$orig_bytes=c$orig$size; c$conn$orig_bytes=c$orig$size;
@ -212,7 +219,7 @@ function set_conn(c: connection, eoc: bool)
c$conn$resp_ip_bytes = c$resp$num_bytes_ip; c$conn$resp_ip_bytes = c$resp$num_bytes_ip;
} }
local service = determine_service(c); local service = determine_service(c);
if ( service != "" ) if ( service != "" )
c$conn$service=service; c$conn$service=service;
c$conn$conn_state=conn_state(c, get_port_transport_proto(c$id$resp_p)); c$conn$conn_state=conn_state(c, get_port_transport_proto(c$id$resp_p));
@ -224,10 +231,18 @@ function set_conn(c: connection, eoc: bool)
event content_gap(c: connection, is_orig: bool, seq: count, length: count) &priority=5 event content_gap(c: connection, is_orig: bool, seq: count, length: count) &priority=5
{ {
set_conn(c, F); set_conn(c, F);
c$conn$missed_bytes = c$conn$missed_bytes + length; c$conn$missed_bytes = c$conn$missed_bytes + length;
} }
event tunnel_changed(c: connection, e: EncapsulatingConnVector) &priority=5
{
set_conn(c, F);
if ( |e| > 0 )
add c$conn$tunnel_parents[e[|e|-1]$uid];
c$tunnel = e;
}
event connection_state_remove(c: connection) &priority=5 event connection_state_remove(c: connection) &priority=5
{ {
set_conn(c, T); set_conn(c, T);

View file

@ -39,24 +39,22 @@ export {
rcode: count &log &optional; rcode: count &log &optional;
## A descriptive name for the response code value. ## A descriptive name for the response code value.
rcode_name: string &log &optional; rcode_name: string &log &optional;
## Whether the message is a query (F) or response (T).
QR: bool &log &default=F;
## The Authoritative Answer bit for response messages specifies that ## The Authoritative Answer bit for response messages specifies that
## the responding name server is an authority for the domain name ## the responding name server is an authority for the domain name
## in the question section. ## in the question section.
AA: bool &log &default=F; AA: bool &log &default=F;
## The Truncation bit specifies that the message was truncated. ## The Truncation bit specifies that the message was truncated.
TC: bool &log &default=F; TC: bool &log &default=F;
## The Recursion Desired bit indicates to a name server to recursively ## The Recursion Desired bit in a request message indicates that
## purse the query. ## the client wants recursive service for this query.
RD: bool &log &default=F; RD: bool &log &default=F;
## The Recursion Available bit in a response message indicates if ## The Recursion Available bit in a response message indicates that
## the name server supports recursive queries. ## the name server supports recursive queries.
RA: bool &log &default=F; RA: bool &log &default=F;
## A reserved field that is currently supposed to be zero in all ## A reserved field that is currently supposed to be zero in all
## queries and responses. ## queries and responses.
Z: count &log &default=0; Z: count &log &default=0;
## The set of resource descriptions in answer of the query. ## The set of resource descriptions in the query answer.
answers: vector of string &log &optional; answers: vector of string &log &optional;
## The caching intervals of the associated RRs described by the ## The caching intervals of the associated RRs described by the
## ``answers`` field. ## ``answers`` field.
@ -164,11 +162,11 @@ function set_session(c: connection, msg: dns_msg, is_query: bool)
c$dns = c$dns_state$pending[msg$id]; c$dns = c$dns_state$pending[msg$id];
c$dns$rcode = msg$rcode;
c$dns$rcode_name = base_errors[msg$rcode];
if ( ! is_query ) if ( ! is_query )
{ {
c$dns$rcode = msg$rcode;
c$dns$rcode_name = base_errors[msg$rcode];
if ( ! c$dns?$total_answers ) if ( ! c$dns?$total_answers )
c$dns$total_answers = msg$num_answers; c$dns$total_answers = msg$num_answers;

View file

@ -6,6 +6,7 @@
@load ./utils-commands @load ./utils-commands
@load base/utils/paths @load base/utils/paths
@load base/utils/numbers @load base/utils/numbers
@load base/utils/addrs
module FTP; module FTP;
@ -22,12 +23,14 @@ export {
const default_capture_password = F &redef; const default_capture_password = F &redef;
## User IDs that can be considered "anonymous". ## User IDs that can be considered "anonymous".
const guest_ids = { "anonymous", "ftp", "guest" } &redef; const guest_ids = { "anonymous", "ftp", "ftpuser", "guest" } &redef;
type Info: record { type Info: record {
## Time when the command was sent. ## Time when the command was sent.
ts: time &log; ts: time &log;
## Unique ID for the connection.
uid: string &log; uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log; id: conn_id &log;
## User name for the current FTP session. ## User name for the current FTP session.
user: string &log &default="<unknown>"; user: string &log &default="<unknown>";
@ -160,17 +163,16 @@ function ftp_message(s: Info)
# or it's a deliberately logged command. # or it's a deliberately logged command.
if ( |s$tags| > 0 || (s?$cmdarg && s$cmdarg$cmd in logged_commands) ) if ( |s$tags| > 0 || (s?$cmdarg && s$cmdarg$cmd in logged_commands) )
{ {
if ( s?$password && to_lower(s$user) !in guest_ids ) if ( s?$password &&
! s$capture_password &&
to_lower(s$user) !in guest_ids )
{
s$password = "<hidden>"; s$password = "<hidden>";
}
local arg = s$cmdarg$arg; local arg = s$cmdarg$arg;
if ( s$cmdarg$cmd in file_cmds ) if ( s$cmdarg$cmd in file_cmds )
{ arg = fmt("ftp://%s%s", addr_to_uri(s$id$resp_h), build_path_compressed(s$cwd, arg));
if ( is_v4_addr(s$id$resp_h) )
arg = fmt("ftp://%s%s", s$id$resp_h, build_path_compressed(s$cwd, arg));
else
arg = fmt("ftp://[%s]%s", s$id$resp_h, build_path_compressed(s$cwd, arg));
}
s$ts=s$cmdarg$ts; s$ts=s$cmdarg$ts;
s$command=s$cmdarg$cmd; s$command=s$cmdarg$cmd;

View file

@ -6,7 +6,8 @@
@load ./utils @load ./utils
# Add the magic number signatures to the core signature set. # Add the magic number signatures to the core signature set.
redef signature_files += "base/protocols/http/file-ident.sig"; @load-sigs ./file-ident.sig
# Ignore the signatures used to match files # Ignore the signatures used to match files
redef Signatures::ignored_ids += /^matchfile-/; redef Signatures::ignored_ids += /^matchfile-/;

View file

@ -22,7 +22,9 @@ export {
type Info: record { type Info: record {
## Timestamp for when the request happened. ## Timestamp for when the request happened.
ts: time &log; ts: time &log;
## Unique ID for the connection.
uid: string &log; uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log; id: conn_id &log;
## Represents the pipelined depth into the connection of this ## Represents the pipelined depth into the connection of this
## request/response transaction. ## request/response transaction.
@ -112,7 +114,7 @@ event bro_init() &priority=5
# DPD configuration. # DPD configuration.
const ports = { const ports = {
80/tcp, 81/tcp, 631/tcp, 1080/tcp, 3138/tcp, 80/tcp, 81/tcp, 631/tcp, 1080/tcp, 3128/tcp,
8000/tcp, 8080/tcp, 8888/tcp, 8000/tcp, 8080/tcp, 8888/tcp,
}; };
redef dpd_config += { redef dpd_config += {

View file

@ -1,6 +1,7 @@
##! Utilities specific for HTTP processing. ##! Utilities specific for HTTP processing.
@load ./main @load ./main
@load base/utils/addrs
module HTTP; module HTTP;
@ -51,7 +52,7 @@ function extract_keys(data: string, kv_splitter: pattern): string_vec
function build_url(rec: Info): string function build_url(rec: Info): string
{ {
local uri = rec?$uri ? rec$uri : "/<missed_request>"; local uri = rec?$uri ? rec$uri : "/<missed_request>";
local host = rec?$host ? rec$host : fmt("%s", rec$id$resp_h); local host = rec?$host ? rec$host : addr_to_uri(rec$id$resp_h);
if ( rec$id$resp_p != 80/tcp ) if ( rec$id$resp_p != 80/tcp )
host = fmt("%s:%s", host, rec$id$resp_p); host = fmt("%s:%s", host, rec$id$resp_p);
return fmt("%s%s", host, uri); return fmt("%s%s", host, uri);

View file

@ -11,7 +11,9 @@ export {
type Info: record { type Info: record {
## Timestamp when the command was seen. ## Timestamp when the command was seen.
ts: time &log; ts: time &log;
## Unique ID for the connection.
uid: string &log; uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log; id: conn_id &log;
## Nick name given for the connection. ## Nick name given for the connection.
nick: string &log &optional; nick: string &log &optional;

View file

@ -8,33 +8,51 @@ export {
redef enum Log::ID += { LOG }; redef enum Log::ID += { LOG };
type Info: record { type Info: record {
## Time when the message was first seen.
ts: time &log; ts: time &log;
## Unique ID for the connection.
uid: string &log; uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log; id: conn_id &log;
## This is a number that indicates the number of messages deep into ## A count to represent the depth of this message transaction in a single
## this connection where this particular message was transferred. ## connection where multiple messages were transferred.
trans_depth: count &log; trans_depth: count &log;
## Contents of the Helo header.
helo: string &log &optional; helo: string &log &optional;
## Contents of the From header.
mailfrom: string &log &optional; mailfrom: string &log &optional;
## Contents of the Rcpt header.
rcptto: set[string] &log &optional; rcptto: set[string] &log &optional;
## Contents of the Date header.
date: string &log &optional; date: string &log &optional;
## Contents of the From header.
from: string &log &optional; from: string &log &optional;
## Contents of the To header.
to: set[string] &log &optional; to: set[string] &log &optional;
## Contents of the ReplyTo header.
reply_to: string &log &optional; reply_to: string &log &optional;
## Contents of the MsgID header.
msg_id: string &log &optional; msg_id: string &log &optional;
## Contents of the In-Reply-To header.
in_reply_to: string &log &optional; in_reply_to: string &log &optional;
## Contents of the Subject header.
subject: string &log &optional; subject: string &log &optional;
## Contents of the X-Origininating-IP header.
x_originating_ip: addr &log &optional; x_originating_ip: addr &log &optional;
## Contents of the first Received header.
first_received: string &log &optional; first_received: string &log &optional;
## Contents of the second Received header.
second_received: string &log &optional; second_received: string &log &optional;
## The last message the server sent to the client. ## The last message that the server sent to the client.
last_reply: string &log &optional; last_reply: string &log &optional;
## The message transmission path, as extracted from the headers.
path: vector of addr &log &optional; path: vector of addr &log &optional;
## Value of the User-Agent header from the client.
user_agent: string &log &optional; user_agent: string &log &optional;
## Indicate if the "Received: from" headers should still be processed. ## Indicates if the "Received: from" headers should still be processed.
process_received_from: bool &default=T; process_received_from: bool &default=T;
## Indicates if client activity has been seen, but not yet logged ## Indicates if client activity has been seen, but not yet logged.
has_client_activity: bool &default=F; has_client_activity: bool &default=F;
}; };

View file

@ -0,0 +1,2 @@
@load ./consts
@load ./main

View file

@ -0,0 +1,40 @@
module SOCKS;
export {
type RequestType: enum {
CONNECTION = 1,
PORT = 2,
UDP_ASSOCIATE = 3,
};
const v5_authentication_methods: table[count] of string = {
[0] = "No Authentication Required",
[1] = "GSSAPI",
[2] = "Username/Password",
[3] = "Challenge-Handshake Authentication Protocol",
[5] = "Challenge-Response Authentication Method",
[6] = "Secure Sockets Layer",
[7] = "NDS Authentication",
[8] = "Multi-Authentication Framework",
[255] = "No Acceptable Methods",
} &default=function(i: count):string { return fmt("unknown-%d", i); };
const v4_status: table[count] of string = {
[0x5a] = "succeeded",
[0x5b] = "general SOCKS server failure",
[0x5c] = "request failed because client is not running identd",
[0x5d] = "request failed because client's identd could not confirm the user ID string in the request",
} &default=function(i: count):string { return fmt("unknown-%d", i); };
const v5_status: table[count] of string = {
[0] = "succeeded",
[1] = "general SOCKS server failure",
[2] = "connection not allowed by ruleset",
[3] = "Network unreachable",
[4] = "Host unreachable",
[5] = "Connection refused",
[6] = "TTL expired",
[7] = "Command not supported",
[8] = "Address type not supported",
} &default=function(i: count):string { return fmt("unknown-%d", i); };
}

View file

@ -0,0 +1,92 @@
@load base/frameworks/tunnels
@load ./consts
module SOCKS;
export {
redef enum Log::ID += { LOG };
type Info: record {
## Time when the proxy connection was first detected.
ts: time &log;
## Unique ID for the tunnel - may correspond to connection uid or be non-existent.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## Protocol version of SOCKS.
version: count &log;
## Username for the proxy if extracted from the network..
user: string &log &optional;
## Server status for the attempt at using the proxy.
status: string &log &optional;
## Client requested SOCKS address. Could be an address, a name or both.
request: SOCKS::Address &log &optional;
## Client requested port.
request_p: port &log &optional;
## Server bound address. Could be an address, a name or both.
bound: SOCKS::Address &log &optional;
## Server bound port.
bound_p: port &log &optional;
};
## Event that can be handled to access the SOCKS
## record as it is sent on to the logging framework.
global log_socks: event(rec: Info);
}
event bro_init() &priority=5
{
Log::create_stream(SOCKS::LOG, [$columns=Info, $ev=log_socks]);
}
redef record connection += {
socks: SOCKS::Info &optional;
};
# Configure DPD
redef capture_filters += { ["socks"] = "tcp port 1080" };
redef dpd_config += { [ANALYZER_SOCKS] = [$ports = set(1080/tcp)] };
redef likely_server_ports += { 1080/tcp };
function set_session(c: connection, version: count)
{
if ( ! c?$socks )
c$socks = [$ts=network_time(), $id=c$id, $uid=c$uid, $version=version];
}
event socks_request(c: connection, version: count, request_type: count,
sa: SOCKS::Address, p: port, user: string) &priority=5
{
set_session(c, version);
c$socks$request = sa;
c$socks$request_p = p;
# Copy this conn_id and set the orig_p to zero because in the case of SOCKS proxies there will
# be potentially many source ports since a new proxy connection is established for each
# proxied connection. We treat this as a singular "tunnel".
local cid = copy(c$id);
cid$orig_p = 0/tcp;
Tunnel::register([$cid=cid, $tunnel_type=Tunnel::SOCKS, $payload_proxy=T]);
}
event socks_reply(c: connection, version: count, reply: count, sa: SOCKS::Address, p: port) &priority=5
{
set_session(c, version);
if ( version == 5 )
c$socks$status = v5_status[reply];
else if ( version == 4 )
c$socks$status = v4_status[reply];
c$socks$bound = sa;
c$socks$bound_p = p;
}
event socks_reply(c: connection, version: count, reply: count, sa: SOCKS::Address, p: port) &priority=-5
{
# This will handle the case where the analyzer failed in some way and was removed. We probably
# don't want to log these connections.
if ( "SOCKS" in c$service )
Log::write(SOCKS::LOG, c$socks);
}

View file

@ -26,21 +26,23 @@ export {
type Info: record { type Info: record {
## Time when the SSH connection began. ## Time when the SSH connection began.
ts: time &log; ts: time &log;
## Unique ID for the connection.
uid: string &log; uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log; id: conn_id &log;
## Indicates if the login was heuristically guessed to be "success" ## Indicates if the login was heuristically guessed to be "success"
## or "failure". ## or "failure".
status: string &log &optional; status: string &log &optional;
## Direction of the connection. If the client was a local host ## Direction of the connection. If the client was a local host
## logging into an external host, this would be OUTBOUD. INBOUND ## logging into an external host, this would be OUTBOUND. INBOUND
## would be set for the opposite situation. ## would be set for the opposite situation.
# TODO: handle local-local and remote-remote better. # TODO: handle local-local and remote-remote better.
direction: Direction &log &optional; direction: Direction &log &optional;
## Software string given by the client. ## Software string from the client.
client: string &log &optional; client: string &log &optional;
## Software string given by the server. ## Software string from the server.
server: string &log &optional; server: string &log &optional;
## Amount of data returned from the server. This is currently ## Amount of data returned from the server. This is currently
## the only measure of the success heuristic and it is logged to ## the only measure of the success heuristic and it is logged to
## assist analysts looking at the logs to make their own determination ## assist analysts looking at the logs to make their own determination
## about the success on a case-by-case basis. ## about the success on a case-by-case basis.

View file

@ -77,8 +77,12 @@ export {
[12] = "srp", [12] = "srp",
[13] = "signature_algorithms", [13] = "signature_algorithms",
[14] = "use_srtp", [14] = "use_srtp",
[15] = "heartbeat",
[35] = "SessionTicket TLS", [35] = "SessionTicket TLS",
[40] = "extended_random",
[13172] = "next_protocol_negotiation", [13172] = "next_protocol_negotiation",
[13175] = "origin_bound_certificates",
[13180] = "encrypted_client_certificates",
[65281] = "renegotiation_info" [65281] = "renegotiation_info"
} &default=function(i: count):string { return fmt("unknown-%d", i); }; } &default=function(i: count):string { return fmt("unknown-%d", i); };

View file

@ -9,13 +9,15 @@ export {
redef enum Log::ID += { LOG }; redef enum Log::ID += { LOG };
type Info: record { type Info: record {
## Time when the SSL connection began. ## Time when the SSL connection was first detected.
ts: time &log; ts: time &log;
uid: string &log; ## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log; id: conn_id &log;
## SSL/TLS version the server offered. ## SSL/TLS version that the server offered.
version: string &log &optional; version: string &log &optional;
## SSL/TLS cipher suite the server chose. ## SSL/TLS cipher suite that the server chose.
cipher: string &log &optional; cipher: string &log &optional;
## Value of the Server Name Indicator SSL/TLS extension. It ## Value of the Server Name Indicator SSL/TLS extension. It
## indicates the server name that the client was requesting. ## indicates the server name that the client was requesting.
@ -24,6 +26,8 @@ export {
session_id: string &log &optional; session_id: string &log &optional;
## Subject of the X.509 certificate offered by the server. ## Subject of the X.509 certificate offered by the server.
subject: string &log &optional; subject: string &log &optional;
## Subject of the signer of the X.509 certificate offered by the server.
issuer_subject: string &log &optional;
## NotValidBefore field value from the server certificate. ## NotValidBefore field value from the server certificate.
not_valid_before: time &log &optional; not_valid_before: time &log &optional;
## NotValidAfter field value from the serve certificate. ## NotValidAfter field value from the serve certificate.
@ -146,6 +150,7 @@ event x509_certificate(c: connection, is_orig: bool, cert: X509, chain_idx: coun
# Also save other certificate information about the primary cert. # Also save other certificate information about the primary cert.
c$ssl$subject = cert$subject; c$ssl$subject = cert$subject;
c$ssl$issuer_subject = cert$issuer;
c$ssl$not_valid_before = cert$not_valid_before; c$ssl$not_valid_before = cert$not_valid_before;
c$ssl$not_valid_after = cert$not_valid_after; c$ssl$not_valid_after = cert$not_valid_after;
} }

File diff suppressed because one or more lines are too long

View file

@ -9,9 +9,11 @@ export {
redef enum Log::ID += { LOG }; redef enum Log::ID += { LOG };
type Info: record { type Info: record {
## Timestamp of when the syslog message was seen. ## Timestamp when the syslog message was seen.
ts: time &log; ts: time &log;
## Unique ID for the connection.
uid: string &log; uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log; id: conn_id &log;
## Protocol over which the message was seen. ## Protocol over which the message was seen.
proto: transport_proto &log; proto: transport_proto &log;

View file

@ -98,3 +98,18 @@ function find_ip_addresses(input: string): string_array
} }
return output; return output;
} }
## Returns the string representation of an IP address suitable for inclusion
## in a URI. For IPv4, this does no special formatting, but for IPv6, the
## address is included in square brackets.
##
## a: the address to make suitable for URI inclusion.
##
## Returns: the string representation of *a* suitable for URI inclusion.
function addr_to_uri(a: addr): string
{
if ( is_v4_addr(a) )
return fmt("%s", a);
else
return fmt("[%s]", a);
}

View file

@ -1,10 +1,11 @@
@load ./addrs
## This function can be used to generate a consistent filename for when ## This function can be used to generate a consistent filename for when
## contents of a file, stream, or connection are being extracted to disk. ## contents of a file, stream, or connection are being extracted to disk.
function generate_extraction_filename(prefix: string, c: connection, suffix: string): string function generate_extraction_filename(prefix: string, c: connection, suffix: string): string
{ {
local conn_info = fmt("%s:%d-%s:%d", local conn_info = fmt("%s:%d-%s:%d", addr_to_uri(c$id$orig_h), c$id$orig_p,
c$id$orig_h, c$id$orig_p, c$id$resp_h, c$id$resp_p); addr_to_uri(c$id$resp_h), c$id$resp_p);
if ( prefix != "" ) if ( prefix != "" )
conn_info = fmt("%s_%s", prefix, conn_info); conn_info = fmt("%s_%s", prefix, conn_info);

View file

@ -8,27 +8,31 @@ export {
## Address space that is considered private and unrouted. ## Address space that is considered private and unrouted.
## By default it has RFC defined non-routable IPv4 address space. ## By default it has RFC defined non-routable IPv4 address space.
const private_address_space: set[subnet] = { const private_address_space: set[subnet] = {
10.0.0.0/8, 10.0.0.0/8,
192.168.0.0/16, 192.168.0.0/16,
127.0.0.0/8, 172.16.0.0/12,
172.16.0.0/12 100.64.0.0/10, # RFC6598 Carrier Grade NAT
127.0.0.0/8,
[fe80::]/10,
[::1]/128,
} &redef; } &redef;
## Networks that are considered "local". ## Networks that are considered "local".
const local_nets: set[subnet] &redef; const local_nets: set[subnet] &redef;
## This is used for retrieving the subnet when you multiple ## This is used for retrieving the subnet when using multiple entries in
## :bro:id:`Site::local_nets`. A membership query can be done with an ## :bro:id:`Site::local_nets`. It's populated automatically from there.
## :bro:type:`addr` and the table will yield the subnet it was found ## A membership query can be done with an
## :bro:type:`addr` and the table will yield the subnet it was found
## within. ## within.
global local_nets_table: table[subnet] of subnet = {}; global local_nets_table: table[subnet] of subnet = {};
## Networks that are considered "neighbors". ## Networks that are considered "neighbors".
const neighbor_nets: set[subnet] &redef; const neighbor_nets: set[subnet] &redef;
## If local network administrators are known and they have responsibility ## If local network administrators are known and they have responsibility
## for defined address space, then a mapping can be defined here between ## for defined address space, then a mapping can be defined here between
## networks for which they have responsibility and a set of email ## networks for which they have responsibility and a set of email
## addresses. ## addresses.
const local_admins: table[subnet] of set[string] = {} &redef; const local_admins: table[subnet] of set[string] = {} &redef;
@ -40,27 +44,33 @@ export {
## Function that returns true if an address corresponds to one of ## Function that returns true if an address corresponds to one of
## the local networks, false if not. ## the local networks, false if not.
## The function inspects :bro:id:`Site::local_nets`.
global is_local_addr: function(a: addr): bool; global is_local_addr: function(a: addr): bool;
## Function that returns true if an address corresponds to one of ## Function that returns true if an address corresponds to one of
## the neighbor networks, false if not. ## the neighbor networks, false if not.
## The function inspects :bro:id:`Site::neighbor_nets`.
global is_neighbor_addr: function(a: addr): bool; global is_neighbor_addr: function(a: addr): bool;
## Function that returns true if an address corresponds to one of ## Function that returns true if an address corresponds to one of
## the private/unrouted networks, false if not. ## the private/unrouted networks, false if not.
## The function inspects :bro:id:`Site::private_address_space`.
global is_private_addr: function(a: addr): bool; global is_private_addr: function(a: addr): bool;
## Function that returns true if a host name is within a local ## Function that returns true if a host name is within a local
## DNS zone. ## DNS zone.
## The function inspects :bro:id:`Site::local_zones`.
global is_local_name: function(name: string): bool; global is_local_name: function(name: string): bool;
## Function that returns true if a host name is within a neighbor ## Function that returns true if a host name is within a neighbor
## DNS zone. ## DNS zone.
## The function inspects :bro:id:`Site::neighbor_zones`.
global is_neighbor_name: function(name: string): bool; global is_neighbor_name: function(name: string): bool;
## Function that returns a common separated list of email addresses ## Function that returns a common separated list of email addresses
## that are considered administrators for the IP address provided as ## that are considered administrators for the IP address provided as
## an argument. ## an argument.
## The function inspects :bro:id:`Site::local_admins`.
global get_emails: function(a: addr): string; global get_emails: function(a: addr): string;
} }
@ -73,22 +83,22 @@ function is_local_addr(a: addr): bool
{ {
return a in local_nets; return a in local_nets;
} }
function is_neighbor_addr(a: addr): bool function is_neighbor_addr(a: addr): bool
{ {
return a in neighbor_nets; return a in neighbor_nets;
} }
function is_private_addr(a: addr): bool function is_private_addr(a: addr): bool
{ {
return a in private_address_space; return a in private_address_space;
} }
function is_local_name(name: string): bool function is_local_name(name: string): bool
{ {
return local_dns_suffix_regex in name; return local_dns_suffix_regex in name;
} }
function is_neighbor_name(name: string): bool function is_neighbor_name(name: string): bool
{ {
return local_dns_neighbor_suffix_regex in name; return local_dns_neighbor_suffix_regex in name;
@ -96,7 +106,7 @@ function is_neighbor_name(name: string): bool
# This is a hack for doing a for loop. # This is a hack for doing a for loop.
const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}; const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32};
# TODO: make this work with IPv6 # TODO: make this work with IPv6
function find_all_emails(ip: addr): set[string] function find_all_emails(ip: addr): set[string]
{ {

View file

@ -8,5 +8,6 @@ module Communication;
event bro_init() &priority=-10 event bro_init() &priority=-10
{ {
enable_communication(); enable_communication();
listen(listen_interface, listen_port, listen_ssl); listen(listen_interface, listen_port, listen_ssl, listen_ipv6,
listen_ipv6_zone_id, listen_retry);
} }

View file

@ -25,8 +25,8 @@ event bro_init() &priority=5
# Establish the communication configuration and only request response # Establish the communication configuration and only request response
# messages. # messages.
Communication::nodes["control"] = [$host=host, $p=host_port, Communication::nodes["control"] = [$host=host, $zone_id=zone_id,
$sync=F, $connect=T, $p=host_port, $sync=F, $connect=T,
$class="control", $events=Control::controllee_events]; $class="control", $events=Control::controllee_events];
} }

View file

@ -4,9 +4,10 @@
@load base/frameworks/software @load base/frameworks/software
@load base/protocols/http @load base/protocols/http
@load-sigs ./detect-webapps.sig
module HTTP; module HTTP;
redef signature_files += "protocols/http/detect-webapps.sig";
# Ignore the signatures used to match webapps # Ignore the signatures used to match webapps
redef Signatures::ignored_ids += /^webapp-/; redef Signatures::ignored_ids += /^webapp-/;

View file

@ -1,3 +1,4 @@
##! Watch for various SPAM blocklist URLs in SMTP error messages.
@load base/protocols/smtp @load base/protocols/smtp
@ -5,9 +6,11 @@ module SMTP;
export { export {
redef enum Notice::Type += { redef enum Notice::Type += {
## Indicates that the server sent a reply mentioning an SMTP block list. ## An SMTP server sent a reply mentioning an SMTP block list.
Blocklist_Error_Message, Blocklist_Error_Message,
## Indicates the client's address is seen in the block list error message. ## The originator's address is seen in the block list error message.
## This is useful to detect local hosts sending SPAM with a high
## positive rate.
Blocklist_Blocked_Host, Blocklist_Blocked_Host,
}; };
@ -52,7 +55,8 @@ event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string,
message = fmt("%s is on an SMTP block list", c$id$orig_h); message = fmt("%s is on an SMTP block list", c$id$orig_h);
} }
NOTICE([$note=note, $conn=c, $msg=message, $sub=msg]); NOTICE([$note=note, $conn=c, $msg=message, $sub=msg,
$identifier=cat(c$id$orig_h)]);
} }
} }
} }

View file

@ -0,0 +1,36 @@
##! Load this script to enable global log output to an ElasticSearch database.
module LogElasticSearch;
export {
## An elasticsearch specific rotation interval.
const rotation_interval = 3hr &redef;
## Optionally ignore any :bro:type:`Log::ID` from being sent to
## ElasticSearch with this script.
const excluded_log_ids: set[Log::ID] &redef;
## If you want to explicitly only send certain :bro:type:`Log::ID`
## streams, add them to this set. If the set remains empty, all will
## be sent. The :bro:id:`LogElasticSearch::excluded_log_ids` option will remain in
## effect as well.
const send_logs: set[Log::ID] &redef;
}
event bro_init() &priority=-5
{
if ( server_host == "" )
return;
for ( stream_id in Log::active_streams )
{
if ( stream_id in excluded_log_ids ||
(|send_logs| > 0 && stream_id !in send_logs) )
next;
local filter: Log::Filter = [$name = "default-es",
$writer = Log::WRITER_ELASTICSEARCH,
$interv = LogElasticSearch::rotation_interval];
Log::add_filter(stream_id, filter);
}
}

View file

@ -25,7 +25,7 @@ redef Software::vulnerable_versions += {
@load frameworks/software/version-changes @load frameworks/software/version-changes
# This adds signatures to detect cleartext forward and reverse windows shells. # This adds signatures to detect cleartext forward and reverse windows shells.
redef signature_files += "frameworks/signatures/detect-windows-shells.sig"; @load-sigs frameworks/signatures/detect-windows-shells
# Uncomment the following line to begin receiving (by default hourly) emails # Uncomment the following line to begin receiving (by default hourly) emails
# containing all of your notices. # containing all of your notices.

View file

@ -60,4 +60,5 @@
@load tuning/defaults/__load__.bro @load tuning/defaults/__load__.bro
@load tuning/defaults/packet-fragments.bro @load tuning/defaults/packet-fragments.bro
@load tuning/defaults/warnings.bro @load tuning/defaults/warnings.bro
@load tuning/logs-to-elasticsearch.bro
@load tuning/track-all-assets.bro @load tuning/track-all-assets.bro

24
src/AYIYA.cc Normal file
View file

@ -0,0 +1,24 @@
#include "AYIYA.h"
AYIYA_Analyzer::AYIYA_Analyzer(Connection* conn)
: Analyzer(AnalyzerTag::AYIYA, conn)
{
interp = new binpac::AYIYA::AYIYA_Conn(this);
}
AYIYA_Analyzer::~AYIYA_Analyzer()
{
delete interp;
}
void AYIYA_Analyzer::Done()
{
Analyzer::Done();
Event(udp_session_done);
}
void AYIYA_Analyzer::DeliverPacket(int len, const u_char* data, bool orig, int seq, const IP_Hdr* ip, int caplen)
{
Analyzer::DeliverPacket(len, data, orig, seq, ip, caplen);
interp->NewData(orig, data, data + len);
}

29
src/AYIYA.h Normal file
View file

@ -0,0 +1,29 @@
#ifndef AYIYA_h
#define AYIYA_h
#include "ayiya_pac.h"
class AYIYA_Analyzer : public Analyzer {
public:
AYIYA_Analyzer(Connection* conn);
virtual ~AYIYA_Analyzer();
virtual void Done();
virtual void DeliverPacket(int len, const u_char* data, bool orig,
int seq, const IP_Hdr* ip, int caplen);
static Analyzer* InstantiateAnalyzer(Connection* conn)
{ return new AYIYA_Analyzer(conn); }
static bool Available()
{ return BifConst::Tunnel::enable_ayiya &&
BifConst::Tunnel::max_depth > 0; }
protected:
friend class AnalyzerTimer;
void ExpireTimer(double t);
binpac::AYIYA::AYIYA_Conn* interp;
};
#endif

View file

@ -4,6 +4,7 @@
#include "PIA.h" #include "PIA.h"
#include "Event.h" #include "Event.h"
#include "AYIYA.h"
#include "BackDoor.h" #include "BackDoor.h"
#include "BitTorrent.h" #include "BitTorrent.h"
#include "BitTorrentTracker.h" #include "BitTorrentTracker.h"
@ -33,9 +34,11 @@
#include "NFS.h" #include "NFS.h"
#include "Portmap.h" #include "Portmap.h"
#include "POP3.h" #include "POP3.h"
#include "SOCKS.h"
#include "SSH.h" #include "SSH.h"
#include "SSL-binpac.h" #include "SSL.h"
#include "Syslog-binpac.h" #include "Syslog-binpac.h"
#include "Teredo.h"
#include "ConnSizeAnalyzer.h" #include "ConnSizeAnalyzer.h"
// Keep same order here as in AnalyzerTag definition! // Keep same order here as in AnalyzerTag definition!
@ -49,18 +52,6 @@ const Analyzer::Config Analyzer::analyzer_configs[] = {
{ AnalyzerTag::ICMP, "ICMP", ICMP_Analyzer::InstantiateAnalyzer, { AnalyzerTag::ICMP, "ICMP", ICMP_Analyzer::InstantiateAnalyzer,
ICMP_Analyzer::Available, 0, false }, ICMP_Analyzer::Available, 0, false },
{ AnalyzerTag::ICMP_TimeExceeded, "ICMP_TIMEEXCEEDED",
ICMP_TimeExceeded_Analyzer::InstantiateAnalyzer,
ICMP_TimeExceeded_Analyzer::Available, 0, false },
{ AnalyzerTag::ICMP_Unreachable, "ICMP_UNREACHABLE",
ICMP_Unreachable_Analyzer::InstantiateAnalyzer,
ICMP_Unreachable_Analyzer::Available, 0, false },
{ AnalyzerTag::ICMP_Echo, "ICMP_ECHO",
ICMP_Echo_Analyzer::InstantiateAnalyzer,
ICMP_Echo_Analyzer::Available, 0, false },
{ AnalyzerTag::ICMP_Redir, "ICMP_REDIR",
ICMP_Redir_Analyzer::InstantiateAnalyzer,
ICMP_Redir_Analyzer::Available, 0, false },
{ AnalyzerTag::TCP, "TCP", TCP_Analyzer::InstantiateAnalyzer, { AnalyzerTag::TCP, "TCP", TCP_Analyzer::InstantiateAnalyzer,
TCP_Analyzer::Available, 0, false }, TCP_Analyzer::Available, 0, false },
@ -133,12 +124,22 @@ const Analyzer::Config Analyzer::analyzer_configs[] = {
HTTP_Analyzer_binpac::InstantiateAnalyzer, HTTP_Analyzer_binpac::InstantiateAnalyzer,
HTTP_Analyzer_binpac::Available, 0, false }, HTTP_Analyzer_binpac::Available, 0, false },
{ AnalyzerTag::SSL, "SSL", { AnalyzerTag::SSL, "SSL",
SSL_Analyzer_binpac::InstantiateAnalyzer, SSL_Analyzer::InstantiateAnalyzer,
SSL_Analyzer_binpac::Available, 0, false }, SSL_Analyzer::Available, 0, false },
{ AnalyzerTag::SYSLOG_BINPAC, "SYSLOG_BINPAC", { AnalyzerTag::SYSLOG_BINPAC, "SYSLOG_BINPAC",
Syslog_Analyzer_binpac::InstantiateAnalyzer, Syslog_Analyzer_binpac::InstantiateAnalyzer,
Syslog_Analyzer_binpac::Available, 0, false }, Syslog_Analyzer_binpac::Available, 0, false },
{ AnalyzerTag::AYIYA, "AYIYA",
AYIYA_Analyzer::InstantiateAnalyzer,
AYIYA_Analyzer::Available, 0, false },
{ AnalyzerTag::SOCKS, "SOCKS",
SOCKS_Analyzer::InstantiateAnalyzer,
SOCKS_Analyzer::Available, 0, false },
{ AnalyzerTag::Teredo, "TEREDO",
Teredo_Analyzer::InstantiateAnalyzer,
Teredo_Analyzer::Available, 0, false },
{ AnalyzerTag::File, "FILE", File_Analyzer::InstantiateAnalyzer, { AnalyzerTag::File, "FILE", File_Analyzer::InstantiateAnalyzer,
File_Analyzer::Available, 0, false }, File_Analyzer::Available, 0, false },
{ AnalyzerTag::Backdoor, "BACKDOOR", { AnalyzerTag::Backdoor, "BACKDOOR",

View file

@ -215,6 +215,11 @@ public:
// analyzer, even if the method is called multiple times. // analyzer, even if the method is called multiple times.
virtual void ProtocolConfirmation(); virtual void ProtocolConfirmation();
// Return whether the analyzer previously called ProtocolConfirmation()
// at least once before.
bool ProtocolConfirmed() const
{ return protocol_confirmed; }
// Report that we found a significant protocol violation which might // Report that we found a significant protocol violation which might
// indicate that the analyzed data is in fact not the expected // indicate that the analyzed data is in fact not the expected
// protocol. The protocol_violation event is raised once per call to // protocol. The protocol_violation event is raised once per call to
@ -338,6 +343,10 @@ private:
for ( analyzer_list::iterator var = the_kids.begin(); \ for ( analyzer_list::iterator var = the_kids.begin(); \
var != the_kids.end(); var++ ) var != the_kids.end(); var++ )
#define LOOP_OVER_GIVEN_CONST_CHILDREN(var, the_kids) \
for ( analyzer_list::const_iterator var = the_kids.begin(); \
var != the_kids.end(); var++ )
class SupportAnalyzer : public Analyzer { class SupportAnalyzer : public Analyzer {
public: public:
SupportAnalyzer(AnalyzerTag::Tag tag, Connection* conn, bool arg_orig) SupportAnalyzer(AnalyzerTag::Tag tag, Connection* conn, bool arg_orig)

View file

@ -20,9 +20,7 @@ namespace AnalyzerTag {
PIA_TCP, PIA_UDP, PIA_TCP, PIA_UDP,
// Transport-layer analyzers. // Transport-layer analyzers.
ICMP, ICMP, TCP, UDP,
ICMP_TimeExceeded, ICMP_Unreachable, ICMP_Echo, ICMP_Redir,
TCP, UDP,
// Application-layer analyzers (hand-written). // Application-layer analyzers (hand-written).
BitTorrent, BitTorrentTracker, BitTorrent, BitTorrentTracker,
@ -35,11 +33,15 @@ namespace AnalyzerTag {
DHCP_BINPAC, DNS_TCP_BINPAC, DNS_UDP_BINPAC, DHCP_BINPAC, DNS_TCP_BINPAC, DNS_UDP_BINPAC,
HTTP_BINPAC, SSL, SYSLOG_BINPAC, HTTP_BINPAC, SSL, SYSLOG_BINPAC,
// Decapsulation analyzers.
AYIYA,
SOCKS,
Teredo,
// Other // Other
File, Backdoor, InterConn, SteppingStone, TCPStats, File, Backdoor, InterConn, SteppingStone, TCPStats,
ConnSize, ConnSize,
// Support-analyzers // Support-analyzers
Contents, ContentLine, NVT, Zip, Contents_DNS, Contents_NCP, Contents, ContentLine, NVT, Zip, Contents_DNS, Contents_NCP,
Contents_NetbiosSSN, Contents_Rlogin, Contents_Rsh, Contents_NetbiosSSN, Contents_Rlogin, Contents_Rsh,

View file

@ -5,7 +5,6 @@
#include "util.h" #include "util.h"
#include "net_util.h" #include "net_util.h"
#include "md5.h"
#include "Anon.h" #include "Anon.h"
#include "Val.h" #include "Val.h"
#include "NetVar.h" #include "NetVar.h"
@ -154,7 +153,7 @@ void AnonymizeIPAddr_A50::init()
int AnonymizeIPAddr_A50::PreservePrefix(ipaddr32_t input, int num_bits) int AnonymizeIPAddr_A50::PreservePrefix(ipaddr32_t input, int num_bits)
{ {
DEBUG_MSG("%s/%d\n", DEBUG_MSG("%s/%d\n",
IPAddr(IPAddr::IPv4, &input, IPAddr::Network).AsString().c_str(), IPAddr(IPv4, &input, IPAddr::Network).AsString().c_str(),
num_bits); num_bits);
if ( ! before_anonymization ) if ( ! before_anonymization )

View file

@ -5,7 +5,7 @@
#include "Attr.h" #include "Attr.h"
#include "Expr.h" #include "Expr.h"
#include "Serializer.h" #include "Serializer.h"
#include "LogMgr.h" #include "threading/SerialTypes.h"
const char* attr_name(attr_tag t) const char* attr_name(attr_tag t)
{ {
@ -15,9 +15,10 @@ const char* attr_name(attr_tag t)
"&add_func", "&delete_func", "&expire_func", "&add_func", "&delete_func", "&expire_func",
"&read_expire", "&write_expire", "&create_expire", "&read_expire", "&write_expire", "&create_expire",
"&persistent", "&synchronized", "&postprocessor", "&persistent", "&synchronized", "&postprocessor",
"&encrypt", "&match", "&disable_print_hook", "&encrypt", "&match",
"&raw_output", "&mergeable", "&priority", "&raw_output", "&mergeable", "&priority",
"&group", "&log", "&error_handler", "(&tracked)", "&group", "&log", "&error_handler", "&type_column",
"(&tracked)",
}; };
return attr_names[int(t)]; return attr_names[int(t)];
@ -384,11 +385,6 @@ void Attributes::CheckAttr(Attr* a)
// FIXME: Check here for global ID? // FIXME: Check here for global ID?
break; break;
case ATTR_DISABLE_PRINT_HOOK:
if ( type->Tag() != TYPE_FILE )
Error("&disable_print_hook only applicable to files");
break;
case ATTR_RAW_OUTPUT: case ATTR_RAW_OUTPUT:
if ( type->Tag() != TYPE_FILE ) if ( type->Tag() != TYPE_FILE )
Error("&raw_output only applicable to files"); Error("&raw_output only applicable to files");
@ -416,10 +412,29 @@ void Attributes::CheckAttr(Attr* a)
break; break;
case ATTR_LOG: case ATTR_LOG:
if ( ! LogVal::IsCompatibleType(type) ) if ( ! threading::Value::IsCompatibleType(type) )
Error("&log applied to a type that cannot be logged"); Error("&log applied to a type that cannot be logged");
break; break;
case ATTR_TYPE_COLUMN:
{
if ( type->Tag() != TYPE_PORT )
{
Error("type_column tag only applicable to ports");
break;
}
BroType* atype = a->AttrExpr()->Type();
if ( atype->Tag() != TYPE_STRING ) {
Error("type column needs to have a string argument");
break;
}
break;
}
default: default:
BadTag("Attributes::CheckAttr", attr_name(a->Tag())); BadTag("Attributes::CheckAttr", attr_name(a->Tag()));
} }

View file

@ -28,13 +28,13 @@ typedef enum {
ATTR_POSTPROCESSOR, ATTR_POSTPROCESSOR,
ATTR_ENCRYPT, ATTR_ENCRYPT,
ATTR_MATCH, ATTR_MATCH,
ATTR_DISABLE_PRINT_HOOK,
ATTR_RAW_OUTPUT, ATTR_RAW_OUTPUT,
ATTR_MERGEABLE, ATTR_MERGEABLE,
ATTR_PRIORITY, ATTR_PRIORITY,
ATTR_GROUP, ATTR_GROUP,
ATTR_LOG, ATTR_LOG,
ATTR_ERROR_HANDLER, ATTR_ERROR_HANDLER,
ATTR_TYPE_COLUMN, // for input framework
ATTR_TRACKED, // hidden attribute, tracked by NotifierRegistry ATTR_TRACKED, // hidden attribute, tracked by NotifierRegistry
#define NUM_ATTRS (int(ATTR_TRACKED) + 1) #define NUM_ATTRS (int(ATTR_TRACKED) + 1)
} attr_tag; } attr_tag;

View file

@ -66,45 +66,50 @@ void BitTorrent_Analyzer::DeliverStream(int len, const u_char* data, bool orig)
void BitTorrent_Analyzer::Undelivered(int seq, int len, bool orig) void BitTorrent_Analyzer::Undelivered(int seq, int len, bool orig)
{ {
uint64 entry_offset = orig ?
*interp->upflow()->next_message_offset() :
*interp->downflow()->next_message_offset();
uint64& this_stream_len = orig ? stream_len_orig : stream_len_resp;
bool& this_stop = orig ? stop_orig : stop_resp;
TCP_ApplicationAnalyzer::Undelivered(seq, len, orig); TCP_ApplicationAnalyzer::Undelivered(seq, len, orig);
this_stream_len += len; // TODO: Code commented out for now. I think that shoving data that
// is definitely wrong into the parser seems like a really bad idea.
// The way it's currently tracking the next message offset isn't
// compatible with new 64bit int support in binpac either.
if ( entry_offset < this_stream_len ) //uint64 entry_offset = orig ?
{ // entry point is somewhere in the gap // *interp->upflow()->next_message_offset() :
DeliverWeird("Stopping BitTorrent analysis: cannot recover from content gap", orig); // *interp->downflow()->next_message_offset();
this_stop = true; //uint64& this_stream_len = orig ? stream_len_orig : stream_len_resp;
if ( stop_orig && stop_resp ) //bool& this_stop = orig ? stop_orig : stop_resp;
ProtocolViolation("BitTorrent: content gap and/or protocol violation"); //
} //this_stream_len += len;
else //
{ // fill the gap //if ( entry_offset < this_stream_len )
try // { // entry point is somewhere in the gap
{ // DeliverWeird("Stopping BitTorrent analysis: cannot recover from content gap", orig);
u_char gap[len]; // this_stop = true;
memset(gap, 0, len); // if ( stop_orig && stop_resp )
interp->NewData(orig, gap, gap + len); // ProtocolViolation("BitTorrent: content gap and/or protocol violation");
} // }
catch ( binpac::Exception const &e ) //else
{ // { // fill the gap
DeliverWeird("Stopping BitTorrent analysis: filling content gap failed", orig); // try
this_stop = true; // {
if ( stop_orig && stop_resp ) // u_char gap[len];
ProtocolViolation("BitTorrent: content gap and/or protocol violation"); // memset(gap, 0, len);
} // interp->NewData(orig, gap, gap + len);
} // }
// catch ( binpac::Exception const &e )
// {
// DeliverWeird("Stopping BitTorrent analysis: filling content gap failed", orig);
// this_stop = true;
// if ( stop_orig && stop_resp )
// ProtocolViolation("BitTorrent: content gap and/or protocol violation");
// }
// }
} }
void BitTorrent_Analyzer::EndpointEOF(TCP_Reassembler* endp) void BitTorrent_Analyzer::EndpointEOF(bool is_orig)
{ {
TCP_ApplicationAnalyzer::EndpointEOF(endp); TCP_ApplicationAnalyzer::EndpointEOF(is_orig);
interp->FlowEOF(endp->IsOrig()); interp->FlowEOF(is_orig);
} }
void BitTorrent_Analyzer::DeliverWeird(const char* msg, bool orig) void BitTorrent_Analyzer::DeliverWeird(const char* msg, bool orig)

View file

@ -15,7 +15,7 @@ public:
virtual void Done(); virtual void Done();
virtual void DeliverStream(int len, const u_char* data, bool orig); virtual void DeliverStream(int len, const u_char* data, bool orig);
virtual void Undelivered(int seq, int len, bool orig); virtual void Undelivered(int seq, int len, bool orig);
virtual void EndpointEOF(TCP_Reassembler* endp); virtual void EndpointEOF(bool is_orig);
static Analyzer* InstantiateAnalyzer(Connection* conn) static Analyzer* InstantiateAnalyzer(Connection* conn)
{ return new BitTorrent_Analyzer(conn); } { return new BitTorrent_Analyzer(conn); }

View file

@ -215,9 +215,9 @@ void BitTorrentTracker_Analyzer::Undelivered(int seq, int len, bool orig)
stop_resp = true; stop_resp = true;
} }
void BitTorrentTracker_Analyzer::EndpointEOF(TCP_Reassembler* endp) void BitTorrentTracker_Analyzer::EndpointEOF(bool is_orig)
{ {
TCP_ApplicationAnalyzer::EndpointEOF(endp); TCP_ApplicationAnalyzer::EndpointEOF(is_orig);
} }
void BitTorrentTracker_Analyzer::InitBencParser(void) void BitTorrentTracker_Analyzer::InitBencParser(void)

View file

@ -48,7 +48,7 @@ public:
virtual void Done(); virtual void Done();
virtual void DeliverStream(int len, const u_char* data, bool orig); virtual void DeliverStream(int len, const u_char* data, bool orig);
virtual void Undelivered(int seq, int len, bool orig); virtual void Undelivered(int seq, int len, bool orig);
virtual void EndpointEOF(TCP_Reassembler* endp); virtual void EndpointEOF(bool is_orig);
static Analyzer* InstantiateAnalyzer(Connection* conn) static Analyzer* InstantiateAnalyzer(Connection* conn)
{ return new BitTorrentTracker_Analyzer(conn); } { return new BitTorrentTracker_Analyzer(conn); }

View file

@ -142,6 +142,7 @@ endmacro(GET_BIF_OUTPUT_FILES)
set(BIF_SRCS set(BIF_SRCS
bro.bif bro.bif
logging.bif logging.bif
input.bif
event.bif event.bif
const.bif const.bif
types.bif types.bif
@ -186,6 +187,9 @@ endmacro(BINPAC_TARGET)
binpac_target(binpac-lib.pac) binpac_target(binpac-lib.pac)
binpac_target(binpac_bro-lib.pac) binpac_target(binpac_bro-lib.pac)
binpac_target(ayiya.pac
ayiya-protocol.pac ayiya-analyzer.pac)
binpac_target(bittorrent.pac binpac_target(bittorrent.pac
bittorrent-protocol.pac bittorrent-analyzer.pac) bittorrent-protocol.pac bittorrent-analyzer.pac)
binpac_target(dce_rpc.pac binpac_target(dce_rpc.pac
@ -205,6 +209,8 @@ binpac_target(netflow.pac
netflow-protocol.pac netflow-analyzer.pac) netflow-protocol.pac netflow-analyzer.pac)
binpac_target(smb.pac binpac_target(smb.pac
smb-protocol.pac smb-pipe.pac smb-mailslot.pac) smb-protocol.pac smb-pipe.pac smb-mailslot.pac)
binpac_target(socks.pac
socks-protocol.pac socks-analyzer.pac)
binpac_target(ssl.pac binpac_target(ssl.pac
ssl-defs.pac ssl-protocol.pac ssl-analyzer.pac) ssl-defs.pac ssl-protocol.pac ssl-analyzer.pac)
binpac_target(syslog.pac binpac_target(syslog.pac
@ -213,6 +219,8 @@ binpac_target(syslog.pac
######################################################################## ########################################################################
## bro target ## bro target
find_package (Threads)
# This macro stores associated headers for any C/C++ source files given # This macro stores associated headers for any C/C++ source files given
# as arguments (past _var) as a list in the CMake variable named "_var". # as arguments (past _var) as a list in the CMake variable named "_var".
macro(COLLECT_HEADERS _var) macro(COLLECT_HEADERS _var)
@ -245,7 +253,6 @@ add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/DebugCmdConstants.h
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
) )
set(dns_SRCS nb_dns.c)
set_source_files_properties(nb_dns.c PROPERTIES COMPILE_FLAGS set_source_files_properties(nb_dns.c PROPERTIES COMPILE_FLAGS
-fno-strict-aliasing) -fno-strict-aliasing)
@ -275,6 +282,7 @@ set(bro_SRCS
Anon.cc Anon.cc
ARP.cc ARP.cc
Attr.cc Attr.cc
AYIYA.cc
BackDoor.cc BackDoor.cc
Base64.cc Base64.cc
BitTorrent.cc BitTorrent.cc
@ -330,14 +338,11 @@ set(bro_SRCS
IntSet.cc IntSet.cc
InterConn.cc InterConn.cc
IOSource.cc IOSource.cc
IP.cc
IPAddr.cc IPAddr.cc
IRC.cc IRC.cc
List.cc List.cc
Reporter.cc Reporter.cc
LogMgr.cc
LogWriter.cc
LogWriterAscii.cc
LogWriterNone.cc
Login.cc Login.cc
MIME.cc MIME.cc
NCP.cc NCP.cc
@ -376,8 +381,9 @@ set(bro_SRCS
SmithWaterman.cc SmithWaterman.cc
SMB.cc SMB.cc
SMTP.cc SMTP.cc
SOCKS.cc
SSH.cc SSH.cc
SSL-binpac.cc SSL.cc
Scope.cc Scope.cc
SerializationFormat.cc SerializationFormat.cc
SerialObj.cc SerialObj.cc
@ -392,9 +398,11 @@ set(bro_SRCS
TCP_Endpoint.cc TCP_Endpoint.cc
TCP_Reassembler.cc TCP_Reassembler.cc
Telnet.cc Telnet.cc
Teredo.cc
Timer.cc Timer.cc
Traverse.cc Traverse.cc
Trigger.cc Trigger.cc
TunnelEncapsulation.cc
Type.cc Type.cc
UDP.cc UDP.cc
Val.cc Val.cc
@ -402,15 +410,36 @@ set(bro_SRCS
XDR.cc XDR.cc
ZIP.cc ZIP.cc
bsd-getopt-long.c bsd-getopt-long.c
bro_inet_ntop.c
cq.c cq.c
md5.c
patricia.c patricia.c
setsignal.c setsignal.c
PacketDumper.cc PacketDumper.cc
strsep.c strsep.c
modp_numtoa.c modp_numtoa.c
${dns_SRCS}
${openssl_SRCS} threading/BasicThread.cc
threading/Manager.cc
threading/MsgThread.cc
threading/SerialTypes.cc
logging/Manager.cc
logging/WriterBackend.cc
logging/WriterFrontend.cc
logging/writers/Ascii.cc
logging/writers/DataSeries.cc
logging/writers/ElasticSearch.cc
logging/writers/None.cc
input/Manager.cc
input/ReaderBackend.cc
input/ReaderFrontend.cc
input/readers/Ascii.cc
input/readers/Raw.cc
input/readers/Benchmark.cc
nb_dns.c
digest.h
) )
collect_headers(bro_HEADERS ${bro_SRCS}) collect_headers(bro_HEADERS ${bro_SRCS})
@ -421,7 +450,7 @@ add_definitions(-DBRO_BUILD_PATH="${CMAKE_CURRENT_BINARY_DIR}")
add_executable(bro ${bro_SRCS} ${bro_HEADERS}) add_executable(bro ${bro_SRCS} ${bro_HEADERS})
target_link_libraries(bro ${brodeps}) target_link_libraries(bro ${brodeps} ${CMAKE_THREAD_LIBS_INIT})
install(TARGETS bro DESTINATION bin) install(TARGETS bro DESTINATION bin)
install(FILES ${INSTALL_BIF_OUTPUTS} DESTINATION ${BRO_SCRIPT_INSTALL_PATH}/base) install(FILES ${INSTALL_BIF_OUTPUTS} DESTINATION ${BRO_SCRIPT_INSTALL_PATH}/base)

View file

@ -76,7 +76,7 @@ void ChunkedIO::DumpDebugData(const char* basefnname, bool want_reads)
ChunkedIOFd io(fd, "dump-file"); ChunkedIOFd io(fd, "dump-file");
io.Write(*i); io.Write(*i);
io.Flush(); io.Flush();
close(fd); safe_close(fd);
} }
l->clear(); l->clear();
@ -127,7 +127,7 @@ ChunkedIOFd::~ChunkedIOFd()
delete [] read_buffer; delete [] read_buffer;
delete [] write_buffer; delete [] write_buffer;
close(fd); safe_close(fd);
if ( partial ) if ( partial )
{ {
@ -686,7 +686,7 @@ ChunkedIOSSL::~ChunkedIOSSL()
ssl = 0; ssl = 0;
} }
close(socket); safe_close(socket);
} }

View file

@ -709,7 +709,7 @@ const char* CompositeHash::RecoverOneVal(const HashKey* k, const char* kp0,
const uint32* const kp = AlignType<uint32>(kp0); const uint32* const kp = AlignType<uint32>(kp0);
kp1 = reinterpret_cast<const char*>(kp+4); kp1 = reinterpret_cast<const char*>(kp+4);
IPAddr addr(IPAddr::IPv6, kp, IPAddr::Network); IPAddr addr(IPv6, kp, IPAddr::Network);
switch ( tag ) { switch ( tag ) {
case TYPE_ADDR: case TYPE_ADDR:

View file

@ -13,6 +13,7 @@
#include "Timer.h" #include "Timer.h"
#include "PIA.h" #include "PIA.h"
#include "binpac.h" #include "binpac.h"
#include "TunnelEncapsulation.h"
void ConnectionTimer::Init(Connection* arg_conn, timer_func arg_timer, void ConnectionTimer::Init(Connection* arg_conn, timer_func arg_timer,
int arg_do_expire) int arg_do_expire)
@ -111,7 +112,8 @@ unsigned int Connection::external_connections = 0;
IMPLEMENT_SERIAL(Connection, SER_CONNECTION); IMPLEMENT_SERIAL(Connection, SER_CONNECTION);
Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id) Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id,
uint32 flow, const EncapsulationStack* arg_encap)
{ {
sessions = s; sessions = s;
key = k; key = k;
@ -122,6 +124,10 @@ Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id)
orig_port = id->src_port; orig_port = id->src_port;
resp_port = id->dst_port; resp_port = id->dst_port;
proto = TRANSPORT_UNKNOWN; proto = TRANSPORT_UNKNOWN;
orig_flow_label = flow;
resp_flow_label = 0;
saw_first_orig_packet = 1;
saw_first_resp_packet = 0;
conn_val = 0; conn_val = 0;
login_conn = 0; login_conn = 0;
@ -155,6 +161,11 @@ Connection::Connection(NetSessions* s, HashKey* k, double t, const ConnID* id)
uid = 0; // Will set later. uid = 0; // Will set later.
if ( arg_encap )
encapsulation = new EncapsulationStack(*arg_encap);
else
encapsulation = 0;
if ( conn_timer_mgr ) if ( conn_timer_mgr )
{ {
++external_connections; ++external_connections;
@ -182,12 +193,40 @@ Connection::~Connection()
delete key; delete key;
delete root_analyzer; delete root_analyzer;
delete conn_timer_mgr; delete conn_timer_mgr;
delete encapsulation;
--current_connections; --current_connections;
if ( conn_timer_mgr ) if ( conn_timer_mgr )
--external_connections; --external_connections;
} }
void Connection::CheckEncapsulation(const EncapsulationStack* arg_encap)
{
if ( encapsulation && arg_encap )
{
if ( *encapsulation != *arg_encap )
{
Event(tunnel_changed, 0, arg_encap->GetVectorVal());
delete encapsulation;
encapsulation = new EncapsulationStack(*arg_encap);
}
}
else if ( encapsulation )
{
EncapsulationStack empty;
Event(tunnel_changed, 0, empty.GetVectorVal());
delete encapsulation;
encapsulation = 0;
}
else if ( arg_encap )
{
Event(tunnel_changed, 0, arg_encap->GetVectorVal());
encapsulation = new EncapsulationStack(*arg_encap);
}
}
void Connection::Done() void Connection::Done()
{ {
finished = 1; finished = 1;
@ -323,10 +362,12 @@ RecordVal* Connection::BuildConnVal()
RecordVal *orig_endp = new RecordVal(endpoint); RecordVal *orig_endp = new RecordVal(endpoint);
orig_endp->Assign(0, new Val(0, TYPE_COUNT)); orig_endp->Assign(0, new Val(0, TYPE_COUNT));
orig_endp->Assign(1, new Val(0, TYPE_COUNT)); orig_endp->Assign(1, new Val(0, TYPE_COUNT));
orig_endp->Assign(4, new Val(orig_flow_label, TYPE_COUNT));
RecordVal *resp_endp = new RecordVal(endpoint); RecordVal *resp_endp = new RecordVal(endpoint);
resp_endp->Assign(0, new Val(0, TYPE_COUNT)); resp_endp->Assign(0, new Val(0, TYPE_COUNT));
resp_endp->Assign(1, new Val(0, TYPE_COUNT)); resp_endp->Assign(1, new Val(0, TYPE_COUNT));
resp_endp->Assign(4, new Val(resp_flow_label, TYPE_COUNT));
conn_val->Assign(0, id_val); conn_val->Assign(0, id_val);
conn_val->Assign(1, orig_endp); conn_val->Assign(1, orig_endp);
@ -342,6 +383,9 @@ RecordVal* Connection::BuildConnVal()
char tmp[20]; char tmp[20];
conn_val->Assign(9, new StringVal(uitoa_n(uid, tmp, sizeof(tmp), 62))); conn_val->Assign(9, new StringVal(uitoa_n(uid, tmp, sizeof(tmp), 62)));
if ( encapsulation && encapsulation->Depth() > 0 )
conn_val->Assign(10, encapsulation->GetVectorVal());
} }
if ( root_analyzer ) if ( root_analyzer )
@ -675,6 +719,14 @@ void Connection::FlipRoles()
resp_port = orig_port; resp_port = orig_port;
orig_port = tmp_port; orig_port = tmp_port;
bool tmp_bool = saw_first_resp_packet;
saw_first_resp_packet = saw_first_orig_packet;
saw_first_orig_packet = tmp_bool;
uint32 tmp_flow = resp_flow_label;
resp_flow_label = orig_flow_label;
orig_flow_label = tmp_flow;
Unref(conn_val); Unref(conn_val);
conn_val = 0; conn_val = 0;
@ -882,3 +934,35 @@ void Connection::SetRootAnalyzer(TransportLayerAnalyzer* analyzer, PIA* pia)
root_analyzer = analyzer; root_analyzer = analyzer;
primary_PIA = pia; primary_PIA = pia;
} }
void Connection::CheckFlowLabel(bool is_orig, uint32 flow_label)
{
uint32& my_flow_label = is_orig ? orig_flow_label : resp_flow_label;
if ( my_flow_label != flow_label )
{
if ( conn_val )
{
RecordVal *endp = conn_val->Lookup(is_orig ? 1 : 2)->AsRecordVal();
endp->Assign(4, new Val(flow_label, TYPE_COUNT));
}
if ( connection_flow_label_changed &&
(is_orig ? saw_first_orig_packet : saw_first_resp_packet) )
{
val_list* vl = new val_list(4);
vl->append(BuildConnVal());
vl->append(new Val(is_orig, TYPE_BOOL));
vl->append(new Val(my_flow_label, TYPE_COUNT));
vl->append(new Val(flow_label, TYPE_COUNT));
ConnectionEvent(connection_flow_label_changed, 0, vl);
}
my_flow_label = flow_label;
}
if ( is_orig )
saw_first_orig_packet = 1;
else
saw_first_resp_packet = 1;
}

View file

@ -13,6 +13,7 @@
#include "RuleMatcher.h" #include "RuleMatcher.h"
#include "AnalyzerTags.h" #include "AnalyzerTags.h"
#include "IPAddr.h" #include "IPAddr.h"
#include "TunnelEncapsulation.h"
class Connection; class Connection;
class ConnectionTimer; class ConnectionTimer;
@ -50,9 +51,17 @@ class Analyzer;
class Connection : public BroObj { class Connection : public BroObj {
public: public:
Connection(NetSessions* s, HashKey* k, double t, const ConnID* id); Connection(NetSessions* s, HashKey* k, double t, const ConnID* id,
uint32 flow, const EncapsulationStack* arg_encap);
virtual ~Connection(); virtual ~Connection();
// Invoked when an encapsulation is discovered. It records the
// encapsulation with the connection and raises a "tunnel_changed"
// event if it's different from the previous encapsulation (or the
// first encountered). encap can be null to indicate no
// encapsulation.
void CheckEncapsulation(const EncapsulationStack* encap);
// Invoked when connection is about to be removed. Use Ref(this) // Invoked when connection is about to be removed. Use Ref(this)
// inside Done to keep the connection object around (though it'll // inside Done to keep the connection object around (though it'll
// no longer be accessible from the dictionary of active // no longer be accessible from the dictionary of active
@ -241,6 +250,13 @@ public:
void SetUID(uint64 arg_uid) { uid = arg_uid; } void SetUID(uint64 arg_uid) { uid = arg_uid; }
uint64 GetUID() const { return uid; }
const EncapsulationStack* GetEncapsulation() const
{ return encapsulation; }
void CheckFlowLabel(bool is_orig, uint32 flow_label);
protected: protected:
Connection() { persistent = 0; } Connection() { persistent = 0; }
@ -271,10 +287,12 @@ protected:
IPAddr resp_addr; IPAddr resp_addr;
uint32 orig_port, resp_port; // in network order uint32 orig_port, resp_port; // in network order
TransportProto proto; TransportProto proto;
uint32 orig_flow_label, resp_flow_label; // most recent IPv6 flow labels
double start_time, last_time; double start_time, last_time;
double inactivity_timeout; double inactivity_timeout;
RecordVal* conn_val; RecordVal* conn_val;
LoginConn* login_conn; // either nil, or this LoginConn* login_conn; // either nil, or this
const EncapsulationStack* encapsulation; // tunnels
int suppress_event; // suppress certain events to once per conn. int suppress_event; // suppress certain events to once per conn.
unsigned int installed_status_timer:1; unsigned int installed_status_timer:1;
@ -286,6 +304,7 @@ protected:
unsigned int record_packets:1, record_contents:1; unsigned int record_packets:1, record_contents:1;
unsigned int persistent:1; unsigned int persistent:1;
unsigned int record_current_packet:1, record_current_content:1; unsigned int record_current_packet:1, record_current_content:1;
unsigned int saw_first_orig_packet:1, saw_first_resp_packet:1;
// Count number of connections. // Count number of connections.
static unsigned int total_connections; static unsigned int total_connections;

View file

@ -137,7 +137,7 @@ static bool is_mapped_dce_rpc_endpoint(const dce_rpc_endpoint_addr& addr)
bool is_mapped_dce_rpc_endpoint(const ConnID* id, TransportProto proto) bool is_mapped_dce_rpc_endpoint(const ConnID* id, TransportProto proto)
{ {
if ( id->dst_addr.GetFamily() == IPAddr::IPv6 ) if ( id->dst_addr.GetFamily() == IPv6 )
// TODO: Does the protocol support v6 addresses? #773 // TODO: Does the protocol support v6 addresses? #773
return false; return false;
@ -414,7 +414,7 @@ void DCE_RPC_Session::DeliverEpmapperMapResponse(
case binpac::DCE_RPC_Simple::EPM_PROTOCOL_IP: case binpac::DCE_RPC_Simple::EPM_PROTOCOL_IP:
uint32 hostip = floor->rhs()->data()->ip(); uint32 hostip = floor->rhs()->data()->ip();
mapped.addr.addr = IPAddr(IPAddr::IPv4, &hostip, IPAddr::Host); mapped.addr.addr = IPAddr(IPv4, &hostip, IPAddr::Host);
break; break;
} }
} }

View file

@ -2,9 +2,10 @@
#include "config.h" #include "config.h"
#include <openssl/md5.h>
#include "EquivClass.h" #include "EquivClass.h"
#include "DFA.h" #include "DFA.h"
#include "md5.h"
int dfa_state_cache_size = 10000; int dfa_state_cache_size = 10000;
@ -312,8 +313,8 @@ DFA_State* DFA_State_Cache::Lookup(const NFA_state_list& nfas,
{ {
// We assume that state ID's don't exceed 10 digits, plus // We assume that state ID's don't exceed 10 digits, plus
// we allow one more character for the delimiter. // we allow one more character for the delimiter.
md5_byte_t id_tag[nfas.length() * 11 + 1]; u_char id_tag[nfas.length() * 11 + 1];
md5_byte_t* p = id_tag; u_char* p = id_tag;
for ( int i = 0; i < nfas.length(); ++i ) for ( int i = 0; i < nfas.length(); ++i )
{ {
@ -335,12 +336,8 @@ DFA_State* DFA_State_Cache::Lookup(const NFA_state_list& nfas,
// We use the short MD5 instead of the full string for the // We use the short MD5 instead of the full string for the
// HashKey because the data is copied into the key. // HashKey because the data is copied into the key.
md5_state_t state; u_char digest[16];
md5_byte_t digest[16]; MD5(id_tag, p - id_tag, digest);
md5_init(&state);
md5_append(&state, id_tag, p - id_tag);
md5_finish(&state, digest);
*hash = new HashKey(&digest, sizeof(digest)); *hash = new HashKey(&digest, sizeof(digest));
CacheEntry* e = states.Lookup(*hash); CacheEntry* e = states.Lookup(*hash);

View file

@ -63,10 +63,10 @@ void DNS_TCP_Analyzer_binpac::Done()
interp->FlowEOF(false); interp->FlowEOF(false);
} }
void DNS_TCP_Analyzer_binpac::EndpointEOF(TCP_Reassembler* endp) void DNS_TCP_Analyzer_binpac::EndpointEOF(bool is_orig)
{ {
TCP_ApplicationAnalyzer::EndpointEOF(endp); TCP_ApplicationAnalyzer::EndpointEOF(is_orig);
interp->FlowEOF(endp->IsOrig()); interp->FlowEOF(is_orig);
} }
void DNS_TCP_Analyzer_binpac::DeliverStream(int len, const u_char* data, void DNS_TCP_Analyzer_binpac::DeliverStream(int len, const u_char* data,

View file

@ -45,7 +45,7 @@ public:
virtual void Done(); virtual void Done();
virtual void DeliverStream(int len, const u_char* data, bool orig); virtual void DeliverStream(int len, const u_char* data, bool orig);
virtual void Undelivered(int seq, int len, bool orig); virtual void Undelivered(int seq, int len, bool orig);
virtual void EndpointEOF(TCP_Reassembler* endp); virtual void EndpointEOF(bool is_orig);
static Analyzer* InstantiateAnalyzer(Connection* conn) static Analyzer* InstantiateAnalyzer(Connection* conn)
{ return new DNS_TCP_Analyzer_binpac(conn); } { return new DNS_TCP_Analyzer_binpac(conn); }

View file

@ -321,10 +321,10 @@ void DNS_Mapping::Init(struct hostent* h)
addrs = new IPAddr[num_addrs]; addrs = new IPAddr[num_addrs];
for ( int i = 0; i < num_addrs; ++i ) for ( int i = 0; i < num_addrs; ++i )
if ( h->h_addrtype == AF_INET ) if ( h->h_addrtype == AF_INET )
addrs[i] = IPAddr(IPAddr::IPv4, (uint32*)h->h_addr_list[i], addrs[i] = IPAddr(IPv4, (uint32*)h->h_addr_list[i],
IPAddr::Network); IPAddr::Network);
else if ( h->h_addrtype == AF_INET6 ) else if ( h->h_addrtype == AF_INET6 )
addrs[i] = IPAddr(IPAddr::IPv6, (uint32*)h->h_addr_list[i], addrs[i] = IPAddr(IPv6, (uint32*)h->h_addr_list[i],
IPAddr::Network); IPAddr::Network);
} }
else else
@ -693,7 +693,7 @@ Val* DNS_Mgr::BuildMappingVal(DNS_Mapping* dm)
void DNS_Mgr::AddResult(DNS_Mgr_Request* dr, struct nb_dns_result* r) void DNS_Mgr::AddResult(DNS_Mgr_Request* dr, struct nb_dns_result* r)
{ {
struct hostent* h = (r && r->host_errno == 0) ? r->hostent : 0; struct hostent* h = (r && r->host_errno == 0) ? r->hostent : 0;
u_int32_t ttl = r->ttl; u_int32_t ttl = (r && r->host_errno == 0) ? r->ttl : 0;
DNS_Mapping* new_dm; DNS_Mapping* new_dm;
DNS_Mapping* prev_dm; DNS_Mapping* prev_dm;

View file

@ -74,7 +74,7 @@ void DPM::PostScriptInit()
void DPM::AddConfig(const Analyzer::Config& cfg) void DPM::AddConfig(const Analyzer::Config& cfg)
{ {
#ifdef USE_PERFTOOLS #ifdef USE_PERFTOOLS_DEBUG
HeapLeakChecker::Disabler disabler; HeapLeakChecker::Disabler disabler;
#endif #endif
@ -185,46 +185,8 @@ bool DPM::BuildInitialAnalyzerTree(TransportProto proto, Connection* conn,
break; break;
case TRANSPORT_ICMP: { case TRANSPORT_ICMP: {
const struct icmp* icmpp = (const struct icmp *) data; root = icmp = new ICMP_Analyzer(conn);
switch ( icmpp->icmp_type ) { DBG_DPD(conn, "activated ICMP analyzer");
case ICMP_ECHO:
case ICMP_ECHOREPLY:
if ( ICMP_Echo_Analyzer::Available() )
{
root = icmp = new ICMP_Echo_Analyzer(conn);
DBG_DPD(conn, "activated ICMP Echo analyzer");
}
break;
case ICMP_REDIRECT:
if ( ICMP_Redir_Analyzer::Available() )
{
root = new ICMP_Redir_Analyzer(conn);
DBG_DPD(conn, "activated ICMP Redir analyzer");
}
break;
case ICMP_UNREACH:
if ( ICMP_Unreachable_Analyzer::Available() )
{
root = icmp = new ICMP_Unreachable_Analyzer(conn);
DBG_DPD(conn, "activated ICMP Unreachable analyzer");
}
break;
case ICMP_TIMXCEED:
if ( ICMP_TimeExceeded_Analyzer::Available() )
{
root = icmp = new ICMP_TimeExceeded_Analyzer(conn);
DBG_DPD(conn, "activated ICMP Time Exceeded analyzer");
}
break;
}
if ( ! root )
root = icmp = new ICMP_Analyzer(conn);
analyzed = true; analyzed = true;
break; break;
} }

View file

@ -721,7 +721,6 @@ static char* get_prompt(bool reset_counter = false)
string get_context_description(const Stmt* stmt, const Frame* frame) string get_context_description(const Stmt* stmt, const Frame* frame)
{ {
char buf[1024];
ODesc d; ODesc d;
const BroFunc* func = frame->GetFunction(); const BroFunc* func = frame->GetFunction();
@ -739,10 +738,14 @@ string get_context_description(const Stmt* stmt, const Frame* frame)
loc.last_line = 0; loc.last_line = 0;
} }
safe_snprintf(buf, sizeof(buf), "In %s at %s:%d", size_t buf_size = strlen(d.Description()) + strlen(loc.filename) + 1024;
char* buf = new char[buf_size];
safe_snprintf(buf, buf_size, "In %s at %s:%d",
d.Description(), loc.filename, loc.last_line); d.Description(), loc.filename, loc.last_line);
return string(buf); string retval(buf);
delete [] buf;
return retval;
} }
int dbg_handle_debug_input() int dbg_handle_debug_input()
@ -924,6 +927,8 @@ bool post_execute_stmt(Stmt* stmt, Frame* f, Val* result, stmt_flow_type* flow)
// Evaluates the given expression in the context of the currently selected // Evaluates the given expression in the context of the currently selected
// frame. Returns the resulting value, or nil if none (or there was an error). // frame. Returns the resulting value, or nil if none (or there was an error).
Expr* g_curr_debug_expr = 0; Expr* g_curr_debug_expr = 0;
const char* g_curr_debug_error = 0;
bool in_debug = false;
// ### fix this hardwired access to external variables etc. // ### fix this hardwired access to external variables etc.
struct yy_buffer_state; struct yy_buffer_state;
@ -969,6 +974,11 @@ Val* dbg_eval_expr(const char* expr)
Val* result = 0; Val* result = 0;
if ( yyparse() ) if ( yyparse() )
{ {
if ( g_curr_debug_error )
debug_msg("Parsing expression '%s' failed: %s\n", expr, g_curr_debug_error);
else
debug_msg("Parsing expression '%s' failed\n", expr);
if ( g_curr_debug_expr ) if ( g_curr_debug_expr )
{ {
delete g_curr_debug_expr; delete g_curr_debug_expr;
@ -983,6 +993,9 @@ Val* dbg_eval_expr(const char* expr)
delete g_curr_debug_expr; delete g_curr_debug_expr;
g_curr_debug_expr = 0; g_curr_debug_expr = 0;
delete [] g_curr_debug_error;
g_curr_debug_error = 0;
in_debug = false;
return result; return result;
} }

View file

@ -553,7 +553,8 @@ int dbg_cmd_print(DebugCmd cmd, const vector<string>& args)
for ( int i = 0; i < int(args.size()); ++i ) for ( int i = 0; i < int(args.size()); ++i )
{ {
expr += args[i]; expr += args[i];
expr += " "; if ( i < int(args.size()) - 1 )
expr += " ";
} }
Val* val = dbg_eval_expr(expr.c_str()); Val* val = dbg_eval_expr(expr.c_str());
@ -566,8 +567,7 @@ int dbg_cmd_print(DebugCmd cmd, const vector<string>& args)
} }
else else
{ {
// ### Print something? debug_msg("<expression has no value>\n");
// debug_msg("<expression has no value>\n");
} }
return 1; return 1;

View file

@ -15,7 +15,8 @@ DebugLogger::Stream DebugLogger::streams[NUM_DBGS] = {
{ "compressor", 0, false }, {"string", 0, false }, { "compressor", 0, false }, {"string", 0, false },
{ "notifiers", 0, false }, { "main-loop", 0, false }, { "notifiers", 0, false }, { "main-loop", 0, false },
{ "dpd", 0, false }, { "tm", 0, false }, { "dpd", 0, false }, { "tm", 0, false },
{ "logging", 0, false } { "logging", 0, false }, {"input", 0, false },
{ "threading", 0, false }
}; };
DebugLogger::DebugLogger(const char* filename) DebugLogger::DebugLogger(const char* filename)

View file

@ -24,6 +24,8 @@ enum DebugStream {
DBG_DPD, // Dynamic application detection framework DBG_DPD, // Dynamic application detection framework
DBG_TM, // Time-machine packet input via Brocolli DBG_TM, // Time-machine packet input via Brocolli
DBG_LOGGING, // Logging streams DBG_LOGGING, // Logging streams
DBG_INPUT, // Input streams
DBG_THREADING, // Threading system
NUM_DBGS // Has to be last NUM_DBGS // Has to be last
}; };

View file

@ -157,6 +157,16 @@ void ODesc::Add(double d)
} }
} }
void ODesc::Add(const IPAddr& addr)
{
Add(addr.AsString());
}
void ODesc::Add(const IPPrefix& prefix)
{
Add(prefix.AsString());
}
void ODesc::AddCS(const char* s) void ODesc::AddCS(const char* s)
{ {
int n = strlen(s); int n = strlen(s);

View file

@ -8,7 +8,6 @@
#include <utility> #include <utility>
#include "BroString.h" #include "BroString.h"
#include "IPAddr.h"
typedef enum { typedef enum {
DESC_READABLE, DESC_READABLE,
@ -23,6 +22,8 @@ typedef enum {
} desc_style; } desc_style;
class BroFile; class BroFile;
class IPAddr;
class IPPrefix;
class ODesc { class ODesc {
public: public:
@ -76,8 +77,8 @@ public:
void Add(int64 i); void Add(int64 i);
void Add(uint64 u); void Add(uint64 u);
void Add(double d); void Add(double d);
void Add(const IPAddr& addr) { Add(addr.AsString()); } void Add(const IPAddr& addr);
void Add(const IPPrefix& prefix) { Add(prefix.AsString()); } void Add(const IPPrefix& prefix);
// Add s as a counted string. // Add s as a counted string.
void AddCS(const char* s); void AddCS(const char* s);

View file

@ -10,11 +10,6 @@
Discarder::Discarder() Discarder::Discarder()
{ {
ip_hdr = internal_type("ip_hdr")->AsRecordType();
tcp_hdr = internal_type("tcp_hdr")->AsRecordType();
udp_hdr = internal_type("udp_hdr")->AsRecordType();
icmp_hdr = internal_type("icmp_hdr")->AsRecordType();
check_ip = internal_func("discarder_check_ip"); check_ip = internal_func("discarder_check_ip");
check_tcp = internal_func("discarder_check_tcp"); check_tcp = internal_func("discarder_check_tcp");
check_udp = internal_func("discarder_check_udp"); check_udp = internal_func("discarder_check_udp");
@ -36,12 +31,10 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen)
{ {
int discard_packet = 0; int discard_packet = 0;
const struct ip* ip4 = ip->IP4_Hdr();
if ( check_ip ) if ( check_ip )
{ {
val_list* args = new val_list; val_list* args = new val_list;
args->append(BuildHeader(ip4)); args->append(ip->BuildPktHdrVal());
try try
{ {
@ -59,19 +52,18 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen)
return discard_packet; return discard_packet;
} }
int proto = ip4->ip_p; int proto = ip->NextProto();
if ( proto != IPPROTO_TCP && proto != IPPROTO_UDP && if ( proto != IPPROTO_TCP && proto != IPPROTO_UDP &&
proto != IPPROTO_ICMP ) proto != IPPROTO_ICMP )
// This is not a protocol we understand. // This is not a protocol we understand.
return 0; return 0;
// XXX shall we only check the first packet??? // XXX shall we only check the first packet???
uint32 frag_field = ntohs(ip4->ip_off); if ( ip->IsFragment() )
if ( (frag_field & 0x3fff) != 0 )
// Never check any fragment. // Never check any fragment.
return 0; return 0;
int ip_hdr_len = ip4->ip_hl * 4; int ip_hdr_len = ip->HdrLen();
len -= ip_hdr_len; // remove IP header len -= ip_hdr_len; // remove IP header
caplen -= ip_hdr_len; caplen -= ip_hdr_len;
@ -87,7 +79,7 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen)
// Where the data starts - if this is a protocol we know about, // Where the data starts - if this is a protocol we know about,
// this gets advanced past the transport header. // this gets advanced past the transport header.
const u_char* data = ((u_char*) ip4 + ip_hdr_len); const u_char* data = ip->Payload();
if ( is_tcp ) if ( is_tcp )
{ {
@ -97,8 +89,7 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen)
int th_len = tp->th_off * 4; int th_len = tp->th_off * 4;
val_list* args = new val_list; val_list* args = new val_list;
args->append(BuildHeader(ip4)); args->append(ip->BuildPktHdrVal());
args->append(BuildHeader(tp, len));
args->append(BuildData(data, th_len, len, caplen)); args->append(BuildData(data, th_len, len, caplen));
try try
@ -123,8 +114,7 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen)
int uh_len = sizeof (struct udphdr); int uh_len = sizeof (struct udphdr);
val_list* args = new val_list; val_list* args = new val_list;
args->append(BuildHeader(ip4)); args->append(ip->BuildPktHdrVal());
args->append(BuildHeader(up));
args->append(BuildData(data, uh_len, len, caplen)); args->append(BuildData(data, uh_len, len, caplen));
try try
@ -148,8 +138,7 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen)
const struct icmp* ih = (const struct icmp*) data; const struct icmp* ih = (const struct icmp*) data;
val_list* args = new val_list; val_list* args = new val_list;
args->append(BuildHeader(ip4)); args->append(ip->BuildPktHdrVal());
args->append(BuildHeader(ih));
try try
{ {
@ -168,62 +157,6 @@ int Discarder::NextPacket(const IP_Hdr* ip, int len, int caplen)
return discard_packet; return discard_packet;
} }
Val* Discarder::BuildHeader(const struct ip* ip)
{
RecordVal* hdr = new RecordVal(ip_hdr);
hdr->Assign(0, new Val(ip->ip_hl * 4, TYPE_COUNT));
hdr->Assign(1, new Val(ip->ip_tos, TYPE_COUNT));
hdr->Assign(2, new Val(ntohs(ip->ip_len), TYPE_COUNT));
hdr->Assign(3, new Val(ntohs(ip->ip_id), TYPE_COUNT));
hdr->Assign(4, new Val(ip->ip_ttl, TYPE_COUNT));
hdr->Assign(5, new Val(ip->ip_p, TYPE_COUNT));
hdr->Assign(6, new AddrVal(ip->ip_src.s_addr));
hdr->Assign(7, new AddrVal(ip->ip_dst.s_addr));
return hdr;
}
Val* Discarder::BuildHeader(const struct tcphdr* tp, int tcp_len)
{
RecordVal* hdr = new RecordVal(tcp_hdr);
hdr->Assign(0, new PortVal(ntohs(tp->th_sport), TRANSPORT_TCP));
hdr->Assign(1, new PortVal(ntohs(tp->th_dport), TRANSPORT_TCP));
hdr->Assign(2, new Val(uint32(ntohl(tp->th_seq)), TYPE_COUNT));
hdr->Assign(3, new Val(uint32(ntohl(tp->th_ack)), TYPE_COUNT));
int tcp_hdr_len = tp->th_off * 4;
hdr->Assign(4, new Val(tcp_hdr_len, TYPE_COUNT));
hdr->Assign(5, new Val(tcp_len - tcp_hdr_len, TYPE_COUNT));
hdr->Assign(6, new Val(tp->th_flags, TYPE_COUNT));
hdr->Assign(7, new Val(ntohs(tp->th_win), TYPE_COUNT));
return hdr;
}
Val* Discarder::BuildHeader(const struct udphdr* up)
{
RecordVal* hdr = new RecordVal(udp_hdr);
hdr->Assign(0, new PortVal(ntohs(up->uh_sport), TRANSPORT_UDP));
hdr->Assign(1, new PortVal(ntohs(up->uh_dport), TRANSPORT_UDP));
hdr->Assign(2, new Val(ntohs(up->uh_ulen), TYPE_COUNT));
return hdr;
}
Val* Discarder::BuildHeader(const struct icmp* icmp)
{
RecordVal* hdr = new RecordVal(icmp_hdr);
hdr->Assign(0, new Val(icmp->icmp_type, TYPE_COUNT));
return hdr;
}
Val* Discarder::BuildData(const u_char* data, int hdrlen, int len, int caplen) Val* Discarder::BuildData(const u_char* data, int hdrlen, int len, int caplen)
{ {
len -= hdrlen; len -= hdrlen;

Some files were not shown because too many files have changed in this diff Show more