Merge remote-tracking branch 'origin/master' into topic/vladg/ssh

Conflicts:
	src/analyzer/protocol/CMakeLists.txt
	src/analyzer/protocol/ssh/Plugin.cc
	src/analyzer/protocol/ssh/SSH.h
This commit is contained in:
Vlad Grigorescu 2014-12-27 17:22:26 -06:00
commit fa98aee0a7
954 changed files with 128195 additions and 90795 deletions

6
.gitmodules vendored
View file

@ -16,9 +16,9 @@
[submodule "cmake"] [submodule "cmake"]
path = cmake path = cmake
url = git://git.bro.org/cmake url = git://git.bro.org/cmake
[submodule "magic"]
path = magic
url = git://git.bro.org/bromagic
[submodule "src/3rdparty"] [submodule "src/3rdparty"]
path = src/3rdparty path = src/3rdparty
url = git://git.bro.org/bro-3rdparty url = git://git.bro.org/bro-3rdparty
[submodule "aux/plugins"]
path = aux/plugins
url = git://git.bro.org/bro-plugins

1073
CHANGES

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,9 @@
project(Bro C CXX) project(Bro C CXX)
cmake_minimum_required(VERSION 2.8.0 FATAL_ERROR)
# When changing the minimum version here, also adapt
# aux/bro-aux/plugin-support/skeleton/CMakeLists.txt
cmake_minimum_required(VERSION 2.6.3 FATAL_ERROR)
include(cmake/CommonCMakeConfig.cmake) include(cmake/CommonCMakeConfig.cmake)
######################################################################## ########################################################################
@ -11,22 +15,28 @@ if (NOT BRO_SCRIPT_INSTALL_PATH)
set(BRO_SCRIPT_INSTALL_PATH ${BRO_ROOT_DIR}/share/bro) set(BRO_SCRIPT_INSTALL_PATH ${BRO_ROOT_DIR}/share/bro)
endif () endif ()
if (NOT BRO_MAN_INSTALL_PATH)
# set the default Bro man page installation path (user did not specify one)
set(BRO_MAN_INSTALL_PATH ${BRO_ROOT_DIR}/share/man)
endif ()
# sanitize the Bro script install directory into an absolute path # sanitize the Bro script install directory into an absolute path
# (CMake is confused by ~ as a representation of home directory) # (CMake is confused by ~ as a representation of home directory)
get_filename_component(BRO_SCRIPT_INSTALL_PATH ${BRO_SCRIPT_INSTALL_PATH} get_filename_component(BRO_SCRIPT_INSTALL_PATH ${BRO_SCRIPT_INSTALL_PATH}
ABSOLUTE) ABSOLUTE)
set(BRO_MAGIC_INSTALL_PATH ${BRO_ROOT_DIR}/share/bro/magic) set(BRO_PLUGIN_INSTALL_PATH ${BRO_ROOT_DIR}/lib/bro/plugins CACHE STRING "Installation path for plugins" FORCE)
set(BRO_MAGIC_SOURCE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/magic/database)
configure_file(bro-path-dev.in ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev) configure_file(bro-path-dev.in ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev)
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.sh file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.sh
"export BROPATH=`${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev`\n" "export BROPATH=`${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev`\n"
"export BROMAGIC=\"${BRO_MAGIC_SOURCE_PATH}\"\n" "export BRO_PLUGIN_PATH=\"${CMAKE_CURRENT_BINARY_DIR}/src:${BRO_PLUGIN_INSTALL_PATH}\"\n"
"export PATH=\"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n") "export PATH=\"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n")
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.csh file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev.csh
"setenv BROPATH `${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev`\n" "setenv BROPATH `${CMAKE_CURRENT_BINARY_DIR}/bro-path-dev`\n"
"setenv BROMAGIC \"${BRO_MAGIC_SOURCE_PATH}\"\n" "setenv BRO_PLUGIN_PATH \"${CMAKE_CURRENT_BINARY_DIR}/src:${BRO_PLUGIN_INSTALL_PATH}\"\n"
"setenv PATH \"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n") "setenv PATH \"${CMAKE_CURRENT_BINARY_DIR}/src\":$PATH\n")
file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/VERSION" VERSION LIMIT_COUNT 1) file(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/VERSION" VERSION LIMIT_COUNT 1)
@ -39,32 +49,6 @@ set(VERSION_MAJ_MIN "${VERSION_MAJOR}.${VERSION_MINOR}")
######################################################################## ########################################################################
## Dependency Configuration ## Dependency Configuration
include(ExternalProject)
# LOG_* options to ExternalProject_Add appear in CMake 2.8.3. If
# available, using them hides external project configure/build output.
if("${CMAKE_VERSION}" VERSION_GREATER 2.8.2)
set(EXTERNAL_PROJECT_LOG_OPTIONS
LOG_DOWNLOAD 1 LOG_UPDATE 1 LOG_CONFIGURE 1 LOG_BUILD 1 LOG_INSTALL 1)
else()
set(EXTERNAL_PROJECT_LOG_OPTIONS)
endif()
set(LIBMAGIC_PREFIX ${CMAKE_CURRENT_BINARY_DIR}/libmagic-prefix)
set(LIBMAGIC_INCLUDE_DIR ${LIBMAGIC_PREFIX}/include)
set(LIBMAGIC_LIB_DIR ${LIBMAGIC_PREFIX}/lib)
set(LIBMAGIC_LIBRARY ${LIBMAGIC_LIB_DIR}/libmagic.a)
ExternalProject_Add(libmagic
PREFIX ${LIBMAGIC_PREFIX}
URL ${CMAKE_CURRENT_SOURCE_DIR}/src/3rdparty/file-5.17.tar.gz
CONFIGURE_COMMAND ./configure --enable-static --disable-shared
--prefix=${LIBMAGIC_PREFIX}
--includedir=${LIBMAGIC_INCLUDE_DIR}
--libdir=${LIBMAGIC_LIB_DIR}
BUILD_IN_SOURCE 1
${EXTERNAL_PROJECT_LOG_OPTIONS}
)
include(FindRequiredPackage) include(FindRequiredPackage)
# Check cache value first to avoid displaying "Found sed" messages everytime # Check cache value first to avoid displaying "Found sed" messages everytime
@ -107,7 +91,6 @@ include_directories(BEFORE
${OpenSSL_INCLUDE_DIR} ${OpenSSL_INCLUDE_DIR}
${BIND_INCLUDE_DIR} ${BIND_INCLUDE_DIR}
${BinPAC_INCLUDE_DIR} ${BinPAC_INCLUDE_DIR}
${LIBMAGIC_INCLUDE_DIR}
${ZLIB_INCLUDE_DIR} ${ZLIB_INCLUDE_DIR}
${JEMALLOC_INCLUDE_DIR} ${JEMALLOC_INCLUDE_DIR}
) )
@ -149,33 +132,6 @@ if (GOOGLEPERFTOOLS_FOUND)
endif () endif ()
endif () endif ()
set(USE_DATASERIES false)
find_package(Lintel)
find_package(DataSeries)
find_package(LibXML2)
if (NOT DISABLE_DATASERIES AND
LINTEL_FOUND AND DATASERIES_FOUND AND LIBXML2_FOUND)
set(USE_DATASERIES true)
include_directories(BEFORE ${Lintel_INCLUDE_DIR})
include_directories(BEFORE ${DataSeries_INCLUDE_DIR})
include_directories(BEFORE ${LibXML2_INCLUDE_DIR})
list(APPEND OPTLIBS ${Lintel_LIBRARIES})
list(APPEND OPTLIBS ${DataSeries_LIBRARIES})
list(APPEND OPTLIBS ${LibXML2_LIBRARIES})
endif()
set(USE_ELASTICSEARCH false)
set(USE_CURL false)
find_package(LibCURL)
if (NOT DISABLE_ELASTICSEARCH AND LIBCURL_FOUND)
set(USE_ELASTICSEARCH true)
set(USE_CURL true)
include_directories(BEFORE ${LibCURL_INCLUDE_DIR})
list(APPEND OPTLIBS ${LibCURL_LIBRARIES})
endif()
if (ENABLE_PERFTOOLS_DEBUG OR ENABLE_PERFTOOLS) if (ENABLE_PERFTOOLS_DEBUG OR ENABLE_PERFTOOLS)
# Just a no op to prevent CMake from complaining about manually-specified # Just a no op to prevent CMake from complaining about manually-specified
# ENABLE_PERFTOOLS_DEBUG or ENABLE_PERFTOOLS not being used if google # ENABLE_PERFTOOLS_DEBUG or ENABLE_PERFTOOLS not being used if google
@ -187,7 +143,6 @@ set(brodeps
${PCAP_LIBRARY} ${PCAP_LIBRARY}
${OpenSSL_LIBRARIES} ${OpenSSL_LIBRARIES}
${BIND_LIBRARY} ${BIND_LIBRARY}
${LIBMAGIC_LIBRARY}
${ZLIB_LIBRARY} ${ZLIB_LIBRARY}
${JEMALLOC_LIBRARIES} ${JEMALLOC_LIBRARIES}
${OPTLIBS} ${OPTLIBS}
@ -198,6 +153,8 @@ set(brodeps
include(TestBigEndian) include(TestBigEndian)
test_big_endian(WORDS_BIGENDIAN) test_big_endian(WORDS_BIGENDIAN)
include(CheckSymbolExists)
check_symbol_exists(htonll arpa/inet.h HAVE_BYTEORDER_64)
include(OSSpecific) include(OSSpecific)
include(CheckTypes) include(CheckTypes)
@ -207,6 +164,10 @@ include(MiscTests)
include(PCAPTests) include(PCAPTests)
include(OpenSSLTests) include(OpenSSLTests)
include(CheckNameserCompat) include(CheckNameserCompat)
include(GetArchitecture)
# Tell the plugin code that we're building as part of the main tree.
set(BRO_PLUGIN_INTERNAL_BUILD true CACHE INTERNAL "" FORCE)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.h.in
${CMAKE_CURRENT_BINARY_DIR}/config.h) ${CMAKE_CURRENT_BINARY_DIR}/config.h)
@ -219,6 +180,7 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR})
add_subdirectory(src) add_subdirectory(src)
add_subdirectory(scripts) add_subdirectory(scripts)
add_subdirectory(doc) add_subdirectory(doc)
add_subdirectory(man)
include(CheckOptionalBuildSources) include(CheckOptionalBuildSources)
@ -226,10 +188,6 @@ CheckOptionalBuildSources(aux/broctl Broctl INSTALL_BROCTL)
CheckOptionalBuildSources(aux/bro-aux Bro-Aux INSTALL_AUX_TOOLS) CheckOptionalBuildSources(aux/bro-aux Bro-Aux INSTALL_AUX_TOOLS)
CheckOptionalBuildSources(aux/broccoli Broccoli INSTALL_BROCCOLI) CheckOptionalBuildSources(aux/broccoli Broccoli INSTALL_BROCCOLI)
install(DIRECTORY ./magic/database/
DESTINATION ${BRO_MAGIC_INSTALL_PATH}
)
######################################################################## ########################################################################
## Packaging Setup ## Packaging Setup
@ -275,10 +233,6 @@ message(
"\n tcmalloc: ${USE_PERFTOOLS_TCMALLOC}" "\n tcmalloc: ${USE_PERFTOOLS_TCMALLOC}"
"\n debugging: ${USE_PERFTOOLS_DEBUG}" "\n debugging: ${USE_PERFTOOLS_DEBUG}"
"\njemalloc: ${ENABLE_JEMALLOC}" "\njemalloc: ${ENABLE_JEMALLOC}"
"\ncURL: ${USE_CURL}"
"\n"
"\nDataSeries: ${USE_DATASERIES}"
"\nElasticSearch: ${USE_ELASTICSEARCH}"
"\n" "\n"
"\n================================================================\n" "\n================================================================\n"
) )

View file

@ -48,6 +48,7 @@ bindist:
distclean: distclean:
rm -rf $(BUILD) rm -rf $(BUILD)
$(MAKE) -C testing $@
test: test:
@( cd testing && make ) @( cd testing && make )
@ -55,6 +56,8 @@ test:
test-all: test test-all: test
test -d aux/broctl && ( cd aux/broctl && make test ) test -d aux/broctl && ( cd aux/broctl && make test )
test -d aux/btest && ( cd aux/btest && make test ) test -d aux/btest && ( cd aux/btest && make test )
test -d aux/bro-aux && ( cd aux/bro-aux && make test )
test -d aux/plugins && ( cd aux/plugins && make test-all )
configured: configured:
@test -d $(BUILD) || ( echo "Error: No build/ directory found. Did you run configure?" && exit 1 ) @test -d $(BUILD) || ( echo "Error: No build/ directory found. Did you run configure?" && exit 1 )

137
NEWS
View file

@ -4,18 +4,42 @@ release. For an exhaustive list of changes, see the ``CHANGES`` file
(note that submodules, such as BroControl and Broccoli, come with (note that submodules, such as BroControl and Broccoli, come with
their own ``CHANGES``.) their own ``CHANGES``.)
Bro 2.3 Bro 2.4 (in progress)
======= =====================
[In progress]
Dependencies Dependencies
------------ ------------
- Bro no longer requires a pre-installed libmagic (because it now New Functionality
ships its own). -----------------
- Compiling from source now needs a CMake version >= 2.8.0. - Bro now has support for external plugins that can extend its core
functionality, like protocol/file analysis, via shared libraries.
Plugins can be developed and distributed externally, and will be
pulled in dynamically at startup. Currently, a plugin can provide
custom protocol analyzers, file analyzers, log writers[TODO], input
readers[TODO], packet sources[TODO], and new built-in functions. A
plugin can furthermore hook into Bro's processing a number of places
to add custom logic.
See https://www.bro.org/sphinx-git/devel/plugins.html for more
information on writing plugins.
- Bro now has supoprt for the MySQL wire protocol. Activity gets
logged into mysql.log.
Changed Functionality
---------------------
- bro-cut has been rewritten in C, and is hence much faster.
Bro 2.3
=======
Dependencies
------------
- Libmagic is no longer a dependency.
New Functionality New Functionality
----------------- -----------------
@ -28,6 +52,51 @@ New Functionality
- The DNS analyzer now actually generates the dns_SRV_reply() event. - The DNS analyzer now actually generates the dns_SRV_reply() event.
It had been documented before, yet was never raised. It had been documented before, yet was never raised.
- Bro now uses "file magic signatures" to identify file types. These
are defined via two new constructs in the signature rule parsing
grammar: "file-magic" gives a regular expression to match against,
and "file-mime" gives the MIME type string of content that matches
the magic and an optional strength value for the match. (See also
"Changed Functionality" below for changes due to switching from
using libmagic to such signatures.)
- A new built-in function, "file_magic", can be used to get all file
magic matches and their corresponding strength against a given chunk
of data.
- The SSL analyzer now supports heartbeats as well as a few
extensions, including server_name, alpn, and ec-curves.
- The SSL analyzer comes with Heartbleed detector script in
protocols/ssl/heartbleed.bro. Note that loading this script changes
the default value of "SSL::disable_analyzer_after_detection" from true
to false to prevent encrypted heartbeats from being ignored.
- StartTLS is now supported for SMTP and POP3.
- The X509 analyzer can now perform OSCP validation.
- Bro now has analyzers for SNMP and Radius, which produce corresponding
snmp.log and radius.log output (as well as various events of course).
- BroControl has a new option "BroPort" which allows a user to specify
the starting port number for Bro.
- BroControl has a new option "StatsLogExpireInterval" which allows a
user to specify when entries in the stats.log file expire.
- BroControl has a new option "PFRINGClusterType" which allows a user
to specify a PF_RING cluster type.
- BroControl now supports PF_RING+DNA. There is also a new option
"PFRINGFirstAppInstance" that allows a user to specify the starting
application instance number for processes running on a DNA cluster.
See the BroControl documentation for more details.
- BroControl now warns a user to run "broctl install" if Bro has
been upgraded or if the broctl or node configuration has changed
since the most recent install.
Changed Functionality Changed Functionality
--------------------- ---------------------
@ -44,14 +113,62 @@ Changed Functionality
event x509_extension(c: connection, is_orig: bool, cert: X509, ext: X509_extension_info); event x509_extension(c: connection, is_orig: bool, cert: X509, ext: X509_extension_info);
- In addition, there are several new, more specialized events for a
number of x509 extensions.
- Generally, all x509 events and handling functions have changed their
signatures.
- X509 certificate verification now returns the complete certificate
chain that was used for verification.
- Bro no longer special-cases SYN/FIN/RST-filtered traces by not - Bro no longer special-cases SYN/FIN/RST-filtered traces by not
reporting missing data. The old behavior can be reverted by reporting missing data. Instead, if Bro never sees any data segments
for analyzed TCP connections, the new
base/misc/find-filtered-trace.bro script will log a warning in
reporter.log and to stderr. The old behavior can be reverted by
redef'ing "detect_filtered_trace". redef'ing "detect_filtered_trace".
TODO: Update if we add a detector for filtered traces.
- We have removed the packet sorter component. - We have removed the packet sorter component.
- Bro no longer uses libmagic to identify file types but instead now
comes with its own signature library (which initially is still
derived from libmagic's database). This leads to a number of further
changes with regards to MIME types:
* The second parameter of the "identify_data" built-in function
can no longer be used to get verbose file type descriptions,
though it can still be used to get the strongest matching file
magic signature.
* The "file_transferred" event's "descr" parameter no longer
contains verbose file type descriptions.
* The BROMAGIC environment variable no longer changes any behavior
in Bro as magic databases are no longer used/installed.
* Removed "binary" and "octet-stream" mime type detections. They
don't provide any more information than an uninitialized
mime_type field.
* The "fa_file" record now contains a "mime_types" field that
contains all magic signatures that matched the file content
(where the "mime_type" field is just a shortcut for the
strongest match).
- dns_TXT_reply() now supports more than one string entry by receiving
a vector of strings.
- BroControl now runs the "exec" and "df" broctl commands only once
per host, instead of once per Bro node. The output of these
commands has been changed slightly to include both the host and
node names.
- Several performance improvements were made. Particular emphasis
was put on the File Analysis system, which generally will now emit
far fewer file handle request events due to protocol analyzers now
caching that information internally.
Bro 2.2 Bro 2.2
======= =======

View file

@ -1 +1 @@
2.2-250 2.3-343

@ -1 +1 @@
Subproject commit fe271628492b7b837b3fbcf4626061c8b3568589 Subproject commit 77a86591dcf89d7252d3676d3f1199d6c927d073

@ -1 +1 @@
Subproject commit d7ac87294f415b5ddf3fc81bcae29815d2f835b1 Subproject commit 43a9f360c9bf6b35fcb25d61ebff80c7feb1812b

@ -1 +1 @@
Subproject commit 3138e5068eeeb374c39c3d3b05b482b84d1f6e9c Subproject commit acb8fbe8e7bc6ace5135fb73dca8e29432cdc1ca

@ -1 +1 @@
Subproject commit f8273c9eb5db1168c21b298cd3af4f9b4b008717 Subproject commit 90f9ca0ffa2306f0d1d2ac208cdbb7787199f890

@ -1 +1 @@
Subproject commit 4e2ec35917acb883c7d2ab19af487f3863c687ae Subproject commit d67d89aaee32ad5edb9068db55d1310c2f36970a

1
aux/plugins Submodule

@ -0,0 +1 @@
Subproject commit ad600b5bdcd56a2723e323c0f2c8e1708956ca4f

2
cmake

@ -1 +1 @@
Subproject commit 58c64e663ca9f035f7741775acefce1f6c8d1ed3 Subproject commit 1316c07f7059647b6c4a496ea36e4b83bb5d8f0f

View file

@ -129,6 +129,9 @@
/* whether words are stored with the most significant byte first */ /* whether words are stored with the most significant byte first */
#cmakedefine WORDS_BIGENDIAN #cmakedefine WORDS_BIGENDIAN
/* whether htonll/ntohll is defined in <arpa/inet.h> */
#cmakedefine HAVE_BYTEORDER_64
/* ultrix can't hack const */ /* ultrix can't hack const */
#cmakedefine NEED_ULTRIX_CONST_HACK #cmakedefine NEED_ULTRIX_CONST_HACK
#ifdef NEED_ULTRIX_CONST_HACK #ifdef NEED_ULTRIX_CONST_HACK
@ -209,3 +212,14 @@
/* Common IPv6 extension structure */ /* Common IPv6 extension structure */
#cmakedefine HAVE_IP6_EXT #cmakedefine HAVE_IP6_EXT
/* String with host architecture (e.g., "linux-x86_64") */
#define HOST_ARCHITECTURE "@HOST_ARCHITECTURE@"
/* String with extension of dynamic libraries (e.g., ".so") */
#define DYNAMIC_PLUGIN_SUFFIX "@CMAKE_SHARED_MODULE_SUFFIX@"
/* True if we're building outside of the main Bro source code tree. */
#ifndef BRO_PLUGIN_INTERNAL_BUILD
#define BRO_PLUGIN_INTERNAL_BUILD @BRO_PLUGIN_INTERNAL_BUILD@
#endif

41
configure vendored
View file

@ -24,6 +24,13 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--prefix=PREFIX installation directory [/usr/local/bro] --prefix=PREFIX installation directory [/usr/local/bro]
--scriptdir=PATH root installation directory for Bro scripts --scriptdir=PATH root installation directory for Bro scripts
[PREFIX/share/bro] [PREFIX/share/bro]
--localstatedir=PATH when using BroControl, path to store log files
and run-time data (within log/ and spool/ subdirs)
[PREFIX]
--spooldir=PATH when using BroControl, path to store run-time data
[PREFIX/spool]
--logdir=PATH when using BroControl, path to store log file
[PREFIX/logs]
--conf-files-dir=PATH config files installation directory [PREFIX/etc] --conf-files-dir=PATH config files installation directory [PREFIX/etc]
Optional Features: Optional Features:
@ -39,8 +46,6 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--disable-auxtools don't build or install auxiliary tools --disable-auxtools don't build or install auxiliary tools
--disable-perftools don't try to build with Google Perftools --disable-perftools don't try to build with Google Perftools
--disable-python don't try to build python bindings for broccoli --disable-python don't try to build python bindings for broccoli
--disable-dataseries don't use the optional DataSeries log writer
--disable-elasticsearch don't use the optional ElasticSearch log writer
Required Packages in Non-Standard Locations: Required Packages in Non-Standard Locations:
--with-openssl=PATH path to OpenSSL install root --with-openssl=PATH path to OpenSSL install root
@ -50,7 +55,6 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--with-flex=PATH path to flex executable --with-flex=PATH path to flex executable
--with-bison=PATH path to bison executable --with-bison=PATH path to bison executable
--with-perl=PATH path to perl executable --with-perl=PATH path to perl executable
--with-libmagic=PATH path to libmagic install root
Optional Packages in Non-Standard Locations: Optional Packages in Non-Standard Locations:
--with-geoip=PATH path to the libGeoIP install root --with-geoip=PATH path to the libGeoIP install root
@ -63,9 +67,6 @@ Usage: $0 [OPTION]... [VAR=VALUE]...
--with-ruby-lib=PATH path to ruby library --with-ruby-lib=PATH path to ruby library
--with-ruby-inc=PATH path to ruby headers --with-ruby-inc=PATH path to ruby headers
--with-swig=PATH path to SWIG executable --with-swig=PATH path to SWIG executable
--with-dataseries=PATH path to DataSeries and Lintel libraries
--with-xml2=PATH path to libxml2 installation (for DataSeries)
--with-curl=PATH path to libcurl install root (for ElasticSearch)
Packaging Options (for developers): Packaging Options (for developers):
--binary-package toggle special logic for binary packaging --binary-package toggle special logic for binary packaging
@ -150,6 +151,15 @@ while [ $# -ne 0 ]; do
append_cache_entry BRO_ETC_INSTALL_DIR PATH $optarg append_cache_entry BRO_ETC_INSTALL_DIR PATH $optarg
user_set_conffilesdir="true" user_set_conffilesdir="true"
;; ;;
--localstatedir=*)
append_cache_entry BRO_LOCAL_STATE_DIR PATH $optarg
;;
--spooldir=*)
append_cache_entry BRO_SPOOL_DIR PATH $optarg
;;
--logdir=*)
append_cache_entry BRO_LOG_DIR PATH $optarg
;;
--enable-debug) --enable-debug)
append_cache_entry ENABLE_DEBUG BOOL true append_cache_entry ENABLE_DEBUG BOOL true
;; ;;
@ -184,12 +194,6 @@ while [ $# -ne 0 ]; do
--enable-ruby) --enable-ruby)
append_cache_entry DISABLE_RUBY_BINDINGS BOOL false append_cache_entry DISABLE_RUBY_BINDINGS BOOL false
;; ;;
--disable-dataseries)
append_cache_entry DISABLE_DATASERIES BOOL true
;;
--disable-elasticsearch)
append_cache_entry DISABLE_ELASTICSEARCH BOOL true
;;
--with-openssl=*) --with-openssl=*)
append_cache_entry OpenSSL_ROOT_DIR PATH $optarg append_cache_entry OpenSSL_ROOT_DIR PATH $optarg
;; ;;
@ -211,9 +215,6 @@ while [ $# -ne 0 ]; do
--with-perl=*) --with-perl=*)
append_cache_entry PERL_EXECUTABLE PATH $optarg append_cache_entry PERL_EXECUTABLE PATH $optarg
;; ;;
--with-libmagic=*)
append_cache_entry LibMagic_ROOT_DIR PATH $optarg
;;
--with-geoip=*) --with-geoip=*)
append_cache_entry LibGeoIP_ROOT_DIR PATH $optarg append_cache_entry LibGeoIP_ROOT_DIR PATH $optarg
;; ;;
@ -247,16 +248,6 @@ while [ $# -ne 0 ]; do
--with-swig=*) --with-swig=*)
append_cache_entry SWIG_EXECUTABLE PATH $optarg append_cache_entry SWIG_EXECUTABLE PATH $optarg
;; ;;
--with-dataseries=*)
append_cache_entry DataSeries_ROOT_DIR PATH $optarg
append_cache_entry Lintel_ROOT_DIR PATH $optarg
;;
--with-xml2=*)
append_cache_entry LibXML2_ROOT_DIR PATH $optarg
;;
--with-curl=*)
append_cache_entry LibCURL_ROOT_DIR PATH $optarg
;;
--binary-package) --binary-package)
append_cache_entry BINARY_PACKAGING_MODE BOOL true append_cache_entry BINARY_PACKAGING_MODE BOOL true
;; ;;

View file

@ -14,8 +14,6 @@ if (NOT ${retval} EQUAL 0)
message(FATAL_ERROR "Problem setting BROPATH") message(FATAL_ERROR "Problem setting BROPATH")
endif () endif ()
set(BROMAGIC ${BRO_MAGIC_SOURCE_PATH})
# Configure the Sphinx config file (expand variables CMake might know about). # Configure the Sphinx config file (expand variables CMake might know about).
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in
${CMAKE_CURRENT_BINARY_DIR}/conf.py ${CMAKE_CURRENT_BINARY_DIR}/conf.py
@ -34,7 +32,6 @@ add_custom_target(sphinxdoc
${CMAKE_CURRENT_SOURCE_DIR}/ ${SPHINX_INPUT_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/ ${SPHINX_INPUT_DIR}
# Use Bro/Broxygen to dynamically generate reST for all Bro scripts. # Use Bro/Broxygen to dynamically generate reST for all Bro scripts.
COMMAND BROPATH=${BROPATH} COMMAND BROPATH=${BROPATH}
BROMAGIC=${BROMAGIC}
${CMAKE_BINARY_DIR}/src/bro ${CMAKE_BINARY_DIR}/src/bro
-X ${CMAKE_CURRENT_BINARY_DIR}/broxygen.conf -X ${CMAKE_CURRENT_BINARY_DIR}/broxygen.conf
broxygen >/dev/null broxygen >/dev/null

View file

@ -10,7 +10,7 @@ common/general documentation, style sheets, JavaScript, etc. The Sphinx
config file is produced from ``conf.py.in``, and can be edited to change config file is produced from ``conf.py.in``, and can be edited to change
various Sphinx options. various Sphinx options.
There is also a custom Sphinx domain implemented in ``source/ext/bro.py`` There is also a custom Sphinx domain implemented in ``ext/bro.py``
which adds some reST directives and roles that aid in generating useful which adds some reST directives and roles that aid in generating useful
index entries and cross-references. Other extensions can be added in index entries and cross-references. Other extensions can be added in
a similar fashion. a similar fashion.
@ -19,7 +19,8 @@ The ``make doc`` target in the top-level Makefile can be used to locally
render the reST files into HTML. That target depends on: render the reST files into HTML. That target depends on:
* Python interpreter >= 2.5 * Python interpreter >= 2.5
* `Sphinx <http://sphinx.pocoo.org/>`_ >= 1.0.1 * `Sphinx <http://sphinx-doc.org/>`_ >= 1.0.1
* Doxygen (required only for building the Broccoli API doc)
After completion, HTML documentation is symlinked in ``build/html``. After completion, HTML documentation is symlinked in ``build/html``.

View file

@ -10,7 +10,7 @@
{% endblock %} {% endblock %}
{% block header %} {% block header %}
<iframe src="http://www.bro.org/frames/header-no-logo.html" width="100%" height="100px" frameborder="0" marginheight="0" scrolling="no" marginwidth="0"> <iframe src="//www.bro.org/frames/header-no-logo.html" width="100%" height="100px" frameborder="0" marginheight="0" scrolling="no" marginwidth="0">
</iframe> </iframe>
{% endblock %} {% endblock %}
@ -108,6 +108,6 @@
{% endblock %} {% endblock %}
{% block footer %} {% block footer %}
<iframe src="http://www.bro.org/frames/footer.html" width="100%" height="420px" frameborder="0" marginheight="0" scrolling="no" marginwidth="0"> <iframe src="//www.bro.org/frames/footer.html" width="100%" height="420px" frameborder="0" marginheight="0" scrolling="no" marginwidth="0">
</iframe> </iframe>
{% endblock %} {% endblock %}

View file

@ -1,18 +1,19 @@
======================== ========================
Setting up a Bro Cluster Bro Cluster Architecture
======================== ========================
Intro
------
Bro is not multithreaded, so once the limitations of a single processor core Bro is not multithreaded, so once the limitations of a single processor core
are reached the only option currently is to spread the workload across many are reached the only option currently is to spread the workload across many
cores, or even many physical computers. The cluster deployment scenario for cores, or even many physical computers. The cluster deployment scenario for
Bro is the current solution to build these larger systems. The accompanying Bro is the current solution to build these larger systems. The tools and
tools and scripts provide the structure to easily manage many Bro processes scripts that accompany Bro provide the structure to easily manage many Bro
examining packets and doing correlation activities but acting as a singular, processes examining packets and doing correlation activities but acting as
cohesive entity. a singular, cohesive entity. This document describes the Bro cluster
architecture. For information on how to configure a Bro cluster,
see the documentation for
:doc:`BroControl <../components/broctl/README>`.
Architecture Architecture
--------------- ---------------
@ -41,11 +42,11 @@ messages and notices from the rest of the nodes in the cluster using the Bro
communications protocol. The result is a single log instead of many communications protocol. The result is a single log instead of many
discrete logs that you have to combine in some manner with post-processing. discrete logs that you have to combine in some manner with post-processing.
The manager also takes the opportunity to de-duplicate notices, and it has the The manager also takes the opportunity to de-duplicate notices, and it has the
ability to do so since its acting as the choke point for notices and how notices ability to do so since it's acting as the choke point for notices and how
might be processed into actions (e.g., emailing, paging, or blocking). notices might be processed into actions (e.g., emailing, paging, or blocking).
The manager process is started first by BroControl and it only opens its The manager process is started first by BroControl and it only opens its
designated port and waits for connections, it doesnt initiate any designated port and waits for connections, it doesn't initiate any
connections to the rest of the cluster. Once the workers are started and connections to the rest of the cluster. Once the workers are started and
connect to the manager, logs and notices will start arriving to the manager connect to the manager, logs and notices will start arriving to the manager
process from the workers. process from the workers.
@ -58,12 +59,11 @@ the workers by alleviating the need for all of the workers to connect
directly to each other. directly to each other.
Examples of synchronized state from the scripts that ship with Bro include Examples of synchronized state from the scripts that ship with Bro include
the full list of “known” hosts and services (which are hosts or services the full list of "known" hosts and services (which are hosts or services
identified as performing full TCP handshakes) or an analyzed protocol has been identified as performing full TCP handshakes) or an analyzed protocol has been
found on the connection. If worker A detects host 1.2.3.4 as an active host, found on the connection. If worker A detects host 1.2.3.4 as an active host,
it would be beneficial for worker B to know that as well. So worker A shares it would be beneficial for worker B to know that as well. So worker A shares
that information as an insertion to a set that information as an insertion to a set which travels to the cluster's
<link to set documentation would be good here> which travels to the clusters
proxy and the proxy sends that same set insertion to worker B. The result proxy and the proxy sends that same set insertion to worker B. The result
is that worker A and worker B have shared knowledge about host and services is that worker A and worker B have shared knowledge about host and services
that are active on the network being monitored. that are active on the network being monitored.
@ -79,7 +79,7 @@ necessary for the number of workers they are serving. It is best to start
with a single proxy and add more if communication performance problems are with a single proxy and add more if communication performance problems are
found. found.
Bro processes acting as proxies dont tend to be extremely hard on CPU Bro processes acting as proxies don't tend to be extremely hard on CPU
or memory and users frequently run proxy processes on the same physical or memory and users frequently run proxy processes on the same physical
host as the manager. host as the manager.
@ -106,7 +106,7 @@ dedicated to being workers with each one containing dual 6-core processors.
Once a flow-based load balancer is put into place this model is extremely Once a flow-based load balancer is put into place this model is extremely
easy to scale. It is recommended that you estimate the amount of easy to scale. It is recommended that you estimate the amount of
hardware you will need to fully analyze your traffic. If more is needed its hardware you will need to fully analyze your traffic. If more is needed it's
relatively easy to increase the size of the cluster in most cases. relatively easy to increase the size of the cluster in most cases.
Frontend Options Frontend Options
@ -147,14 +147,13 @@ On host flow balancing
PF_RING PF_RING
^^^^^^^ ^^^^^^^
The PF_RING software for Linux has a “clustering” feature which will do The PF_RING software for Linux has a "clustering" feature which will do
flow-based load balancing across a number of processes that are sniffing the flow-based load balancing across a number of processes that are sniffing the
same interface. This allows you to easily take advantage of multiple same interface. This allows you to easily take advantage of multiple
cores in a single physical host because Bros main event loop is single cores in a single physical host because Bro's main event loop is single
threaded and cant natively utilize all of the cores. More information about threaded and can't natively utilize all of the cores. If you want to use
Bro with PF_RING can be found here: (someone want to write a quick Bro/PF_RING PF_RING, see the documentation on `how to configure Bro with PF_RING
tutorial to link to here? document installing kernel module, libpcap <http://bro.org/documentation/load-balancing.html>`_.
wrapper, building Bro with the --with-pcap configure option)
Netmap Netmap
^^^^^^ ^^^^^^
@ -167,7 +166,7 @@ Click! Software Router
^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^
Click! can be used for flow based load balancing with a simple configuration. Click! can be used for flow based load balancing with a simple configuration.
(link to an example for the config). This solution is not recommended on This solution is not recommended on
Linux due to Bros PF_RING support and only as a last resort on other Linux due to Bro's PF_RING support and only as a last resort on other
operating systems since it causes a lot of overhead due to context switching operating systems since it causes a lot of overhead due to context switching
back and forth between kernel and userland several times per packet. back and forth between kernel and userland several times per packet.

View file

@ -21,7 +21,7 @@ sys.path.insert(0, os.path.abspath('sphinx_input/ext'))
# ----- Begin of BTest configuration. ----- # ----- Begin of BTest configuration. -----
btest = os.path.abspath("@CMAKE_SOURCE_DIR@/aux/btest") btest = os.path.abspath("@CMAKE_SOURCE_DIR@/aux/btest")
brocut = os.path.abspath("@CMAKE_SOURCE_DIR@/aux/bro-aux/bro-cut") brocut = os.path.abspath("@CMAKE_SOURCE_DIR@/build/aux/bro-aux/bro-cut")
bro = os.path.abspath("@CMAKE_SOURCE_DIR@/build/src") bro = os.path.abspath("@CMAKE_SOURCE_DIR@/build/src")
os.environ["PATH"] += (":%s:%s/sphinx:%s:%s" % (btest, btest, bro, brocut)) os.environ["PATH"] += (":%s:%s/sphinx:%s:%s" % (btest, btest, bro, brocut))
@ -38,7 +38,6 @@ extensions += ["broxygen"]
bro_binary = os.path.abspath("@CMAKE_SOURCE_DIR@/build/src/bro") bro_binary = os.path.abspath("@CMAKE_SOURCE_DIR@/build/src/bro")
broxygen_cache="@BROXYGEN_CACHE_DIR@" broxygen_cache="@BROXYGEN_CACHE_DIR@"
os.environ["BROPATH"] = "@BROPATH@" os.environ["BROPATH"] = "@BROPATH@"
os.environ["BROMAGIC"] = "@BROMAGIC@"
# ----- End of Broxygen configuration. ----- # ----- End of Broxygen configuration. -----
# -- General configuration ----------------------------------------------------- # -- General configuration -----------------------------------------------------

263
doc/configuration/index.rst Normal file
View file

@ -0,0 +1,263 @@
.. _configuration:
=====================
Cluster Configuration
=====================
.. contents::
A *Bro Cluster* is a set of systems jointly analyzing the traffic of
a network link in a coordinated fashion. You can operate such a setup from
a central manager system easily using BroControl because BroControl
hides much of the complexity of the multi-machine installation.
This section gives examples of how to setup common cluster configurations
using BroControl. For a full reference on BroControl, see the
:doc:`BroControl <../components/broctl/README>` documentation.
Preparing to Setup a Cluster
============================
In this document we refer to the user account used to set up the cluster
as the "Bro user". When setting up a cluster the Bro user must be set up
on all hosts, and this user must have ssh access from the manager to all
machines in the cluster, and it must work without being prompted for a
password/passphrase (for example, using ssh public key authentication).
Also, on the worker nodes this user must have access to the target
network interface in promiscuous mode.
Additional storage must be available on all hosts under the same path,
which we will call the cluster's prefix path. We refer to this directory
as ``<prefix>``. If you build Bro from source, then ``<prefix>`` is
the directory specified with the ``--prefix`` configure option,
or ``/usr/local/bro`` by default. The Bro user must be able to either
create this directory or, where it already exists, must have write
permission inside this directory on all hosts.
When trying to decide how to configure the Bro nodes, keep in mind that
there can be multiple Bro instances running on the same host. For example,
it's possible to run a proxy and the manager on the same host. However, it is
recommended to run workers on a different machine than the manager because
workers can consume a lot of CPU resources. The maximum recommended
number of workers to run on a machine should be one or two less than
the number of CPU cores available on that machine. Using a load-balancing
method (such as PF_RING) along with CPU pinning can decrease the load on
the worker machines.
Basic Cluster Configuration
===========================
With all prerequisites in place, perform the following steps to setup
a Bro cluster (do this as the Bro user on the manager host only):
- Edit the BroControl configuration file, ``<prefix>/etc/broctl.cfg``,
and change the value of any BroControl options to be more suitable for
your environment. You will most likely want to change the value of
the ``MailTo`` and ``LogRotationInterval`` options. A complete
reference of all BroControl options can be found in the
:doc:`BroControl <../components/broctl/README>` documentation.
- Edit the BroControl node configuration file, ``<prefix>/etc/node.cfg``
to define where manager, proxies, and workers are to run. For a cluster
configuration, you must comment-out (or remove) the standalone node
in that file, and either uncomment or add node entries for each node
in your cluster (manager, proxy, and workers). For example, if you wanted
to run four Bro nodes (two workers, one proxy, and a manager) on a cluster
consisting of three machines, your cluster configuration would look like
this::
[manager]
type=manager
host=10.0.0.10
[proxy-1]
type=proxy
host=10.0.0.10
[worker-1]
type=worker
host=10.0.0.11
interface=eth0
[worker-2]
type=worker
host=10.0.0.12
interface=eth0
For a complete reference of all options that are allowed in the ``node.cfg``
file, see the :doc:`BroControl <../components/broctl/README>` documentation.
- Edit the network configuration file ``<prefix>/etc/networks.cfg``. This
file lists all of the networks which the cluster should consider as local
to the monitored environment.
- Install workers and proxies using BroControl::
> broctl install
- Some tasks need to be run on a regular basis. On the manager node,
insert a line like this into the crontab of the user running the
cluster::
0-59/5 * * * * <prefix>/bin/broctl cron
(Note: if you are editing the system crontab instead of a user's own
crontab, then you need to also specify the user which the command
will be run as. The username must be placed after the time fields
and before the broctl command.)
Note that on some systems (FreeBSD in particular), the default PATH
for cron jobs does not include the directories where bash and python
are installed (the symptoms of this problem would be that "broctl cron"
works when run directly by the user, but does not work from a cron job).
To solve this problem, you would either need to create symlinks
to bash and python in a directory that is in the default PATH for
cron jobs, or specify a new PATH in the crontab.
PF_RING Cluster Configuration
=============================
`PF_RING <http://www.ntop.org/products/pf_ring/>`_ allows speeding up the
packet capture process by installing a new type of socket in Linux systems.
It supports 10Gbit hardware packet filtering using standard network adapters,
and user-space DNA (Direct NIC Access) for fast packet capture/transmission.
Installing PF_RING
^^^^^^^^^^^^^^^^^^
1. Download and install PF_RING for your system following the instructions
`here <http://www.ntop.org/get-started/download/#PF_RING>`_. The following
commands will install the PF_RING libraries and kernel module (replace
the version number 5.6.2 in this example with the version that you
downloaded)::
cd /usr/src
tar xvzf PF_RING-5.6.2.tar.gz
cd PF_RING-5.6.2/userland/lib
./configure --prefix=/opt/pfring
make install
cd ../libpcap
./configure --prefix=/opt/pfring
make install
cd ../tcpdump-4.1.1
./configure --prefix=/opt/pfring
make install
cd ../../kernel
make install
modprobe pf_ring enable_tx_capture=0 min_num_slots=32768
Refer to the documentation for your Linux distribution on how to load the
pf_ring module at boot time. You will need to install the PF_RING
library files and kernel module on all of the workers in your cluster.
2. Download the Bro source code.
3. Configure and install Bro using the following commands::
./configure --with-pcap=/opt/pfring
make
make install
4. Make sure Bro is correctly linked to the PF_RING libpcap libraries::
ldd /usr/local/bro/bin/bro | grep pcap
libpcap.so.1 => /opt/pfring/lib/libpcap.so.1 (0x00007fa6d7d24000)
5. Configure BroControl to use PF_RING (explained below).
6. Run "broctl install" on the manager. This command will install Bro and
all required scripts to the other machines in your cluster.
Using PF_RING
^^^^^^^^^^^^^
In order to use PF_RING, you need to specify the correct configuration
options for your worker nodes in BroControl's node configuration file.
Edit the ``node.cfg`` file and specify ``lb_method=pf_ring`` for each of
your worker nodes. Next, use the ``lb_procs`` node option to specify how
many Bro processes you'd like that worker node to run, and optionally pin
those processes to certain CPU cores with the ``pin_cpus`` option (CPU
numbering starts at zero). The correct ``pin_cpus`` setting to use is
dependent on your CPU architecture (Intel and AMD systems enumerate
processors in different ways). Using the wrong ``pin_cpus`` setting
can cause poor performance. Here is what a worker node entry should
look like when using PF_RING and CPU pinning::
[worker-1]
type=worker
host=10.0.0.50
interface=eth0
lb_method=pf_ring
lb_procs=10
pin_cpus=2,3,4,5,6,7,8,9,10,11
Using PF_RING+DNA with symmetric RSS
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You must have a PF_RING+DNA license in order to do this. You can sniff
each packet only once.
1. Load the DNA NIC driver (i.e. ixgbe) on each worker host.
2. Run "ethtool -L dna0 combined 10" (this will establish 10 RSS queues
on your NIC) on each worker host. You must make sure that you set the
number of RSS queues to the same as the number you specify for the
lb_procs option in the node.cfg file.
3. On the manager, configure your worker(s) in node.cfg::
[worker-1]
type=worker
host=10.0.0.50
interface=dna0
lb_method=pf_ring
lb_procs=10
Using PF_RING+DNA with pfdnacluster_master
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You must have a PF_RING+DNA license and a libzero license in order to do
this. You can load balance between multiple applications and sniff the
same packets multiple times with different tools.
1. Load the DNA NIC driver (i.e. ixgbe) on each worker host.
2. Run "ethtool -L dna0 1" (this will establish 1 RSS queues on your NIC)
on each worker host.
3. Run the pfdnacluster_master command on each worker host. For example::
pfdnacluster_master -c 21 -i dna0 -n 10
Make sure that your cluster ID (21 in this example) matches the interface
name you specify in the node.cfg file. Also make sure that the number
of processes you're balancing across (10 in this example) matches
the lb_procs option in the node.cfg file.
4. If you are load balancing to other processes, you can use the
pfringfirstappinstance variable in broctl.cfg to set the first
application instance that Bro should use. For example, if you are running
pfdnacluster_master with "-n 10,4" you would set
pfringfirstappinstance=4. Unfortunately that's still a global setting
in broctl.cfg at the moment but we may change that to something you can
set in node.cfg eventually.
5. On the manager, configure your worker(s) in node.cfg::
[worker-1]
type=worker
host=10.0.0.50
interface=dnacluster:21
lb_method=pf_ring
lb_procs=10

447
doc/devel/plugins.rst Normal file
View file

@ -0,0 +1,447 @@
===================
Writing Bro Plugins
===================
Bro is internally moving to a plugin structure that enables extending
the system dynamically, without modifying the core code base. That way
custom code remains self-contained and can be maintained, compiled,
and installed independently. Currently, plugins can add the following
functionality to Bro:
- Bro scripts.
- Builtin functions/events/types for the scripting language.
- Protocol analyzers.
- File analyzers.
- Packet sources and packet dumpers.
- Logging framework backends.
- Input framework readers.
A plugin's functionality is available to the user just as if Bro had
the corresponding code built-in. Indeed, internally many of Bro's
pieces are structured as plugins as well, they are just statically
compiled into the binary rather than loaded dynamically at runtime.
Quick Start
===========
Writing a basic plugin is quite straight-forward as long as one
follows a few conventions. In the following we walk a simple example
plugin that adds a new built-in function (bif) to Bro: we'll add
``rot13(s: string) : string``, a function that rotates every character
in a string by 13 places.
Generally, a plugin comes in the form of a directory following a
certain structure. To get started, Bro's distribution provides a
helper script ``aux/bro-aux/plugin-support/init-plugin`` that creates
a skeleton plugin that can then be customized. Let's use that::
# mkdir rot13-plugin
# cd rot13-plugin
# init-plugin Demo Rot13
As you can see the script takes two arguments. The first is a
namespace the plugin will live in, and the second a descriptive name
for the plugin itself. Bro uses the combination of the two to identify
a plugin. The namespace serves to avoid naming conflicts between
plugins written by independent developers; pick, e.g., the name of
your organisation. The namespace ``Bro`` is reserved for functionality
distributed by the Bro Project. In our example, the plugin will be
called ``Demo::Rot13``.
The ``init-plugin`` script puts a number of files in place. The full
layout is described later. For now, all we need is
``src/rot13.bif``. It's initially empty, but we'll add our new bif
there as follows::
# cat src/rot13.bif
module CaesarCipher;
function rot13%(s: string%) : string
%{
char* rot13 = copy_string(s->CheckString());
for ( char* p = rot13; *p; p++ )
{
char b = islower(*p) ? 'a' : 'A';
*p = (*p - b + 13) % 26 + b;
}
BroString* bs = new BroString(1, reinterpret_cast<byte_vec>(rot13),
strlen(rot13));
return new StringVal(bs);
%}
The syntax of this file is just like any other ``*.bif`` file; we
won't go into it here.
Now we can already compile our plugin, we just need to tell the
configure script put in place by ``init-plugin`` where the Bro source
tree is located (Bro needs to have been built there first)::
# ./configure --bro-dist=/path/to/bro/dist && make
[... cmake output ...]
Now our ``rot13-plugin`` directory has everything that it needs
for Bro to recognize it as a dynamic plugin. Once we point Bro to it,
it will pull it in automatically, as we can check with the ``-N``
option::
# export BRO_PLUGIN_PATH=/path/to/rot13-plugin
# bro -N
[...]
Plugin: Demo::Rot13 - <Insert brief description of plugin> (dynamic, version 1)
[...]
That looks quite good, except for the dummy description that we should
replace with something nicer so that users will know what our plugin
is about. We do this by editing the ``config.description`` line in
``src/Plugin.cc``, like this::
[...]
plugin::Configuration Configure()
{
plugin::Configuration config;
config.name = "Demo::Rot13";
config.description = "Caesar cipher rotating a string's characters by 13 places.";
config.version.major = 1;
config.version.minor = 0;
return config;
}
[...]
# make
[...]
# bro -N | grep Rot13
Plugin: Demo::Rot13 - Caesar cipher rotating a string's characters by 13 places. (dynamic, version 1)
Better. Bro can also show us what exactly the plugin provides with the
more verbose option ``-NN``::
# bro -NN
[...]
Plugin: Demo::Rot13 - Caesar cipher rotating a string's characters by 13 places. (dynamic, version 1)
[Function] CaesarCipher::rot13
[...]
There's our function. Now let's use it::
# bro -e 'print CaesarCipher::rot13("Hello")'
Uryyb
It works. We next install the plugin along with Bro itself, so that it
will find it directly without needing the ``BRO_PLUGIN_PATH``
environment variable. If we first unset the variable, the function
will no longer be available::
# unset BRO_PLUGIN_PATH
# bro -e 'print CaesarCipher::rot13("Hello")'
error in <command line>, line 1: unknown identifier CaesarCipher::rot13, at or near "CaesarCipher::rot13"
Once we install it, it works again::
# make install
# bro -e 'print CaesarCipher::rot13("Hello")'
Uryyb
The installed version went into
``<bro-install-prefix>/lib/bro/plugins/Demo_Rot13``.
We can distribute the plugin in either source or binary form by using
the Makefile's ``sdist`` and ``bdist`` target, respectively. Both
create corrsponding tarballs::
# make sdist
[...]
Source distribution in build/sdist/Demo_Rot13.tar.gz
# make bdist
[...]
Binary distribution in build/Demo_Rot13-darwin-x86_64.tar.gz
The source archive will contain everything in the plugin directory
except any generated files. The binary archive will contain anything
needed to install and run the plugin, i.e., just what ``make install``
puts into place as well. As the binary distribution is
platform-dependent, its name includes the OS and architecture the
plugin was built on.
Plugin Directory Layout
=======================
A plugin's directory needs to follow a set of conventions so that Bro
(1) recognizes it as a plugin, and (2) knows what to load. While
``init-plugin`` takes care of most of this, the following is the full
story. We'll use ``<base>`` to represent a plugin's top-level
directory.
``<base>/__bro_plugin__``
A file that marks a directory as containing a Bro plugin. The file
must exist, and its content must consist of a single line with the
qualified name of the plugin (e.g., "Demo::Rot13").
``<base>/lib/<plugin-name>-<os>-<arch>.so``
The shared library containing the plugin's compiled code. Bro will
load this in dynamically at run-time if OS and architecture match
the current platform.
``scripts/``
A directory with the plugin's custom Bro scripts. When the plugin
gets activated, this directory will be automatically added to
``BROPATH``, so that any scripts/modules inside can be
"@load"ed.
``scripts``/__load__.bro
A Bro script that will be loaded immediately when the plugin gets
activated. See below for more information on activating plugins.
``lib/bif/``
Directory with auto-generated Bro scripts that declare the plugin's
bif elements. The files here are produced by ``bifcl``.
By convention, a plugin should put its custom scripts into sub folders
of ``scripts/``, i.e., ``scripts/<script-namespace>/<script>.bro`` to
avoid conflicts. As usual, you can then put a ``__load__.bro`` in
there as well so that, e.g., ``@load Demo/Rot13`` could load a whole
module in the form of multiple individual scripts.
Note that in addition to the paths above, the ``init-plugin`` helper
puts some more files and directories in place that help with
development and installation (e.g., ``CMakeLists.txt``, ``Makefile``,
and source code in ``src/``). However, all these do not have a special
meaning for Bro at runtime and aren't necessary for a plugin to
function.
``init-plugin``
===============
``init-plugin`` puts a basic plugin structure in place that follows
the above layout and augments it with a CMake build and installation
system. Plugins with this structure can be used both directly out of
their source directory (after ``make`` and setting Bro's
``BRO_PLUGIN_PATH``), and when installed alongside Bro (after ``make
install``).
``make install`` copies over the ``lib`` and ``scripts`` directories,
as well as the ``__bro_plugin__`` magic file and the ``README`` (which
you should customize). One can add further CMake ``install`` rules to
install additional files if needed.
``init-plugin`` will never overwrite existing files, so it's safe to
rerun in an existing plugin directory; it only put files in place that
don't exist yet. That also provides a convenient way to revert a file
back to what ``init-plugin`` created originally: just delete it and
rerun.
Activating a Plugin
===================
A plugin needs to be *activated* to make it available to the user.
Activating a plugin will:
1. Load the dynamic module
2. Make any bif items available
3. Add the ``scripts/`` directory to ``BROPATH``
4. Load ``scripts/__load__.bro``
By default, Bro will automatically activate all dynamic plugins found
in its search path ``BRO_PLUGIN_PATH``. However, in bare mode (``bro
-b``), no dynamic plugins will be activated by default; instead the
user can selectively enable individual plugins in scriptland using the
``@load-plugin <qualified-plugin-name>`` directive (e.g.,
``@load-plugin Demo::Rot13``). Alternatively, one can activate a
plugin from the command-line by specifying its full name
(``Demo::Rot13``), or set the environment variable
``BRO_PLUGIN_ACTIVATE`` to a list of comma(!)-separated names of
plugins to unconditionally activate, even in bare mode.
``bro -N`` shows activated plugins separately from found but not yet
activated plugins. Note that plugins compiled statically into Bro are
always activated, and hence show up as such even in bare mode.
Plugin Components
=================
The following subsections detail providing individual types of
functionality via plugins. Note that a single plugin can provide more
than one component type. For example, a plugin could provide multiple
protocol analyzers at once; or both a logging backend and input reader
at the same time.
.. todo::
These subsections are mostly missing right now, as much of their
content isn't actually plugin-specific, but concerns generally
writing such functionality for Bro. The best way to get started
right now is to look at existing code implementing similar
functionality, either as a plugin or inside Bro proper. Also, for
each component type there's a unit test in
``testing/btest/plugins`` creating a basic plugin skeleton with a
corresponding component.
Bro Scripts
-----------
Scripts are easy: just put them into ``scripts/``, as described above.
The CMake infrastructure will automatically install them, as well
include them into the source and binary plugin distributions.
Builtin Language Elements
-------------------------
Functions
TODO
Events
TODO
Types
TODO
Protocol Analyzers
------------------
TODO.
File Analyzers
--------------
TODO.
Logging Writer
--------------
TODO.
Input Reader
------------
TODO.
Packet Sources
--------------
TODO.
Packet Dumpers
--------------
TODO.
Hooks
=====
TODO.
Testing Plugins
===============
A plugin should come with a test suite to exercise its functionality.
The ``init-plugin`` script puts in place a basic </btest/README> setup
to start with. Initially, it comes with a single test that just checks
that Bro loads the plugin correctly. It won't have a baseline yet, so
let's get that in place::
# cd tests
# btest -d
[ 0%] plugin.loading ... failed
% 'btest-diff output' failed unexpectedly (exit code 100)
% cat .diag
== File ===============================
Demo::Rot13 - Caesar cipher rotating a string's characters by 13 places. (dynamic, version 1.0)
[Function] CaesarCipher::rot13
== Error ===============================
test-diff: no baseline found.
=======================================
# btest -U
all 1 tests successful
# cd ..
# make test
make -C tests
make[1]: Entering directory `tests'
all 1 tests successful
make[1]: Leaving directory `tests'
Now let's add a custom test that ensures that our bif works
correctly::
# cd tests
# cat >plugin/rot13.bro
# @TEST-EXEC: bro %INPUT >output
# @TEST-EXEC: btest-diff output
event bro_init()
{
print CaesarCipher::rot13("Hello");
}
Check the output::
# btest -d plugin/rot13.bro
[ 0%] plugin.rot13 ... failed
% 'btest-diff output' failed unexpectedly (exit code 100)
% cat .diag
== File ===============================
Uryyb
== Error ===============================
test-diff: no baseline found.
=======================================
% cat .stderr
1 of 1 test failed
Install the baseline::
# btest -U plugin/rot13.bro
all 1 tests successful
Run the test-suite::
# btest
all 2 tests successful
Debugging Plugins
=================
If your plugin isn't loading as expected, Bro's debugging facilities
can help to illuminate what's going on. To enable, recompile Bro
with debugging support (``./configure --enable-debug``), and
afterwards rebuild your plugin as well. If you then run Bro with ``-B
plugins``, it will produce a file ``debug.log`` that records details
about the process for searching, loading, and activating plugins.
To generate your own debugging output from inside your plugin, you can
add a custom debug stream by using the ``PLUGIN_DBG_LOG(<plugin>,
<args>)`` macro (defined in ``DebugLogger.h``), where ``<plugin>`` is
the ``Plugin`` instance and ``<args>`` are printf-style arguments,
just as with Bro's standard debugging macros (grep for ``DBG_LOG`` in
Bro's ``src/`` to see examples). At runtime, you can then activate
your plugin's debugging output with ``-B plugin-<name>``, where
``<name>`` is the name of the plugin as returned by its
``Configure()`` method, yet with the namespace-separator ``::``
replaced with a simple dash. Example: If the plugin is called
``Bro::Demo``, use ``-B plugin-Bro-Demo``. As usual, the debugging
output will be recorded to ``debug.log`` if Bro's compiled in debug
mode.
Documenting Plugins
===================
.. todo::
Integrate all this with Broxygen.

View file

@ -176,6 +176,10 @@ class BroIdentifier(BroGeneric):
def get_index_text(self, objectname, name): def get_index_text(self, objectname, name):
return name return name
class BroKeyword(BroGeneric):
def get_index_text(self, objectname, name):
return name
class BroAttribute(BroGeneric): class BroAttribute(BroGeneric):
def get_index_text(self, objectname, name): def get_index_text(self, objectname, name):
return _('%s (attribute)') % (name) return _('%s (attribute)') % (name)
@ -213,6 +217,7 @@ class BroDomain(Domain):
'type': ObjType(l_('type'), 'type'), 'type': ObjType(l_('type'), 'type'),
'namespace': ObjType(l_('namespace'), 'namespace'), 'namespace': ObjType(l_('namespace'), 'namespace'),
'id': ObjType(l_('id'), 'id'), 'id': ObjType(l_('id'), 'id'),
'keyword': ObjType(l_('keyword'), 'keyword'),
'enum': ObjType(l_('enum'), 'enum'), 'enum': ObjType(l_('enum'), 'enum'),
'attr': ObjType(l_('attr'), 'attr'), 'attr': ObjType(l_('attr'), 'attr'),
} }
@ -221,6 +226,7 @@ class BroDomain(Domain):
'type': BroGeneric, 'type': BroGeneric,
'namespace': BroNamespace, 'namespace': BroNamespace,
'id': BroIdentifier, 'id': BroIdentifier,
'keyword': BroKeyword,
'enum': BroEnum, 'enum': BroEnum,
'attr': BroAttribute, 'attr': BroAttribute,
} }
@ -229,6 +235,7 @@ class BroDomain(Domain):
'type': XRefRole(), 'type': XRefRole(),
'namespace': XRefRole(), 'namespace': XRefRole(),
'id': XRefRole(), 'id': XRefRole(),
'keyword': XRefRole(),
'enum': XRefRole(), 'enum': XRefRole(),
'attr': XRefRole(), 'attr': XRefRole(),
'see': XRefRole(), 'see': XRefRole(),

Binary file not shown.

Binary file not shown.

View file

@ -1,186 +0,0 @@
=============================
Binary Output with DataSeries
=============================
.. rst-class:: opening
Bro's default ASCII log format is not exactly the most efficient
way for storing and searching large volumes of data. An an
alternative, Bro comes with experimental support for `DataSeries
<http://www.hpl.hp.com/techreports/2009/HPL-2009-323.html>`_
output, an efficient binary format for recording structured bulk
data. DataSeries is developed and maintained at HP Labs.
.. contents::
Installing DataSeries
---------------------
To use DataSeries, its libraries must be available at compile-time,
along with the supporting *Lintel* package. Generally, both are
distributed on `HP Labs' web site
<http://tesla.hpl.hp.com/opensource/>`_. Currently, however, you need
to use recent development versions for both packages, which you can
download from github like this::
git clone http://github.com/dataseries/Lintel
git clone http://github.com/dataseries/DataSeries
To build and install the two into ``<prefix>``, do::
( cd Lintel && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX=<prefix> .. && make && make install )
( cd DataSeries && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX=<prefix> .. && make && make install )
Please refer to the packages' documentation for more information about
the installation process. In particular, there's more information on
required and optional `dependencies for Lintel
<https://raw.github.com/dataseries/Lintel/master/doc/dependencies.txt>`_
and `dependencies for DataSeries
<https://raw.github.com/dataseries/DataSeries/master/doc/dependencies.txt>`_.
For users on RedHat-style systems, you'll need the following::
yum install libxml2-devel boost-devel
Compiling Bro with DataSeries Support
-------------------------------------
Once you have installed DataSeries, Bro's ``configure`` should pick it
up automatically as long as it finds it in a standard system location.
Alternatively, you can specify the DataSeries installation prefix
manually with ``--with-dataseries=<prefix>``. Keep an eye on
``configure``'s summary output, if it looks like the following, Bro
found DataSeries and will compile in the support::
# ./configure --with-dataseries=/usr/local
[...]
====================| Bro Build Summary |=====================
[...]
DataSeries: true
[...]
================================================================
Activating DataSeries
---------------------
The direct way to use DataSeries is to switch *all* log files over to
the binary format. To do that, just add ``redef
Log::default_writer=Log::WRITER_DATASERIES;`` to your ``local.bro``.
For testing, you can also just pass that on the command line::
bro -r trace.pcap Log::default_writer=Log::WRITER_DATASERIES
With that, Bro will now write all its output into DataSeries files
``*.ds``. You can inspect these using DataSeries's set of command line
tools, which its installation process installs into ``<prefix>/bin``.
For example, to convert a file back into an ASCII representation::
$ ds2txt conn.log
[... We skip a bunch of metadata here ...]
ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes
1300475167.096535 CRCC5OdDlXe 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0
1300475167.097012 o7XBsfvo3U1 fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0.000000 0 0 S0 F 0 D 1 199 0 0
1300475167.099816 pXPi1kPMgxb 141.142.220.50 5353 224.0.0.251 5353 udp 0.000000 0 0 S0 F 0 D 1 179 0 0
1300475168.853899 R7sOc16woCj 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 38 89 SF F 0 Dd 1 66 1 117
1300475168.854378 Z6dfHVmt0X7 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 52 99 SF F 0 Dd 1 80 1 127
1300475168.854837 k6T92WxgNAh 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF F 0 Dd 1 66 1 211
[...]
(``--skip-all`` suppresses the metadata.)
Note that the ASCII conversion is *not* equivalent to Bro's default
output format.
You can also switch only individual files over to DataSeries by adding
code like this to your ``local.bro``:
.. code:: bro
event bro_init()
{
local f = Log::get_filter(Conn::LOG, "default"); # Get default filter for connection log.
f$writer = Log::WRITER_DATASERIES; # Change writer type.
Log::add_filter(Conn::LOG, f); # Replace filter with adapted version.
}
Bro's DataSeries writer comes with a few tuning options, see
:doc:`/scripts/base/frameworks/logging/writers/dataseries.bro`.
Working with DataSeries
=======================
Here are a few examples of using DataSeries command line tools to work
with the output files.
* Printing CSV::
$ ds2txt --csv conn.log
ts,uid,id.orig_h,id.orig_p,id.resp_h,id.resp_p,proto,service,duration,orig_bytes,resp_bytes,conn_state,local_orig,missed_bytes,history,orig_pkts,orig_ip_bytes,resp_pkts,resp_ip_bytes
1258790493.773208,ZTtgbHvf4s3,192.168.1.104,137,192.168.1.255,137,udp,dns,3.748891,350,0,S0,F,0,D,7,546,0,0
1258790451.402091,pOY6Rw7lhUd,192.168.1.106,138,192.168.1.255,138,udp,,0.000000,0,0,S0,F,0,D,1,229,0,0
1258790493.787448,pn5IiEslca9,192.168.1.104,138,192.168.1.255,138,udp,,2.243339,348,0,S0,F,0,D,2,404,0,0
1258790615.268111,D9slyIu3hFj,192.168.1.106,137,192.168.1.255,137,udp,dns,3.764626,350,0,S0,F,0,D,7,546,0,0
[...]
Add ``--separator=X`` to set a different separator.
* Extracting a subset of columns::
$ ds2txt --select '*' ts,id.resp_h,id.resp_p --skip-all conn.log
1258790493.773208 192.168.1.255 137
1258790451.402091 192.168.1.255 138
1258790493.787448 192.168.1.255 138
1258790615.268111 192.168.1.255 137
1258790615.289842 192.168.1.255 138
[...]
* Filtering rows::
$ ds2txt --where '*' 'duration > 5 && id.resp_p > 1024' --skip-all conn.ds
1258790631.532888 V8mV5WLITu5 192.168.1.105 55890 239.255.255.250 1900 udp 15.004568 798 0 S0 F 0 D 6 966 0 0
1258792413.439596 tMcWVWQptvd 192.168.1.105 55890 239.255.255.250 1900 udp 15.004581 798 0 S0 F 0 D 6 966 0 0
1258794195.346127 cQwQMRdBrKa 192.168.1.105 55890 239.255.255.250 1900 udp 15.005071 798 0 S0 F 0 D 6 966 0 0
1258795977.253200 i8TEjhWd2W8 192.168.1.105 55890 239.255.255.250 1900 udp 15.004824 798 0 S0 F 0 D 6 966 0 0
1258797759.160217 MsLsBA8Ia49 192.168.1.105 55890 239.255.255.250 1900 udp 15.005078 798 0 S0 F 0 D 6 966 0 0
1258799541.068452 TsOxRWJRGwf 192.168.1.105 55890 239.255.255.250 1900 udp 15.004082 798 0 S0 F 0 D 6 966 0 0
[...]
* Calculate some statistics:
Mean/stddev/min/max over a column::
$ dsstatgroupby '*' basic duration from conn.ds
# Begin DSStatGroupByModule
# processed 2159 rows, where clause eliminated 0 rows
# count(*), mean(duration), stddev, min, max
2159, 42.7938, 1858.34, 0, 86370
[...]
Quantiles of total connection volume::
$ dsstatgroupby '*' quantile 'orig_bytes + resp_bytes' from conn.ds
[...]
2159 data points, mean 24616 +- 343295 [0,1.26615e+07]
quantiles about every 216 data points:
10%: 0, 124, 317, 348, 350, 350, 601, 798, 1469
tails: 90%: 1469, 95%: 7302, 99%: 242629, 99.5%: 1226262
[...]
The ``man`` pages for these tools show further options, and their
``-h`` option gives some more information (either can be a bit cryptic
unfortunately though).
Deficiencies
------------
Due to limitations of the DataSeries format, one cannot inspect its
files before they have been fully written. In other words, when using
DataSeries, it's currently not possible to inspect the live log
files inside the spool directory before they are rotated to their
final location. It seems that this could be fixed with some effort,
and we will work with DataSeries development team on that if the
format gains traction among Bro users.
Likewise, we're considering writing custom command line tools for
interacting with DataSeries files, making that a bit more convenient
than what the standard utilities provide.

View file

@ -1,89 +0,0 @@
=========================================
Indexed Logging Output with ElasticSearch
=========================================
.. rst-class:: opening
Bro's default ASCII log format is not exactly the most efficient
way for searching large volumes of data. ElasticSearch
is a new data storage technology for dealing with tons of data.
It's also a search engine built on top of Apache's Lucene
project. It scales very well, both for distributed indexing and
distributed searching.
.. contents::
Warning
-------
This writer plugin is still in testing and is not yet recommended for
production use! The approach to how logs are handled in the plugin is "fire
and forget" at this time, there is no error handling if the server fails to
respond successfully to the insertion request.
Installing ElasticSearch
------------------------
Download the latest version from: http://www.elasticsearch.org/download/.
Once extracted, start ElasticSearch with::
# ./bin/elasticsearch
For more detailed information, refer to the ElasticSearch installation
documentation: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html
Compiling Bro with ElasticSearch Support
----------------------------------------
First, ensure that you have libcurl installed then run configure::
# ./configure
[...]
====================| Bro Build Summary |=====================
[...]
cURL: true
[...]
ElasticSearch: true
[...]
================================================================
Activating ElasticSearch
------------------------
The easiest way to enable ElasticSearch output is to load the
tuning/logs-to-elasticsearch.bro script. If you are using BroControl,
the following line in local.bro will enable it:
.. console::
@load tuning/logs-to-elasticsearch
With that, Bro will now write most of its logs into ElasticSearch in addition
to maintaining the Ascii logs like it would do by default. That script has
some tunable options for choosing which logs to send to ElasticSearch, refer
to the autogenerated script documentation for those options.
There is an interface being written specifically to integrate with the data
that Bro outputs into ElasticSearch named Brownian. It can be found here::
https://github.com/grigorescu/Brownian
Tuning
------
A common problem encountered with ElasticSearch is too many files being held
open. The ElasticSearch website has some suggestions on how to increase the
open file limit.
- http://www.elasticsearch.org/tutorials/too-many-open-files/
TODO
----
Lots.
- Perform multicast discovery for server.
- Better error detection.
- Better defaults (don't index loaded-plugins, for instance).
-

View file

@ -38,7 +38,7 @@ Bro's logging interface is built around three main abstractions:
Writers Writers
A writer defines the actual output format for the information A writer defines the actual output format for the information
being logged. At the moment, Bro comes with only one type of being logged. At the moment, Bro comes with only one type of
writer, which produces tab separated ASCII files. In the writer, which produces tab separated ASCII files. In the
future we will add further writers, like for binary output and future we will add further writers, like for binary output and
direct logging into a database. direct logging into a database.
@ -98,7 +98,7 @@ Note the fields that are set for the filter:
``include`` ``include``
A set limiting the fields to the ones given. The names A set limiting the fields to the ones given. The names
correspond to those in the :bro:type:`Conn::Info` record, with correspond to those in the :bro:type:`Conn::Info` record, with
sub-records unrolled by concatenating fields (separated with sub-records unrolled by concatenating fields (separated with
dots). dots).
Using the code above, you will now get a new log file ``origs.log`` Using the code above, you will now get a new log file ``origs.log``
@ -155,7 +155,7 @@ that returns the desired path:
{ {
local filter: Log::Filter = [$name="conn-split", $path_func=split_log, $include=set("ts", "id.orig_h")]; local filter: Log::Filter = [$name="conn-split", $path_func=split_log, $include=set("ts", "id.orig_h")];
Log::add_filter(Conn::LOG, filter); Log::add_filter(Conn::LOG, filter);
} }
Running this will now produce two files, ``local.log`` and Running this will now produce two files, ``local.log`` and
``remote.log``, with the corresponding entries. One could extend this ``remote.log``, with the corresponding entries. One could extend this
@ -263,7 +263,7 @@ specific destination exceeds a certain duration:
.. code:: bro .. code:: bro
redef enum Notice::Type += { redef enum Notice::Type += {
## Indicates that a connection remained established longer ## Indicates that a connection remained established longer
## than 5 minutes. ## than 5 minutes.
Long_Conn_Found Long_Conn_Found
}; };
@ -271,8 +271,8 @@ specific destination exceeds a certain duration:
event Conn::log_conn(rec: Conn::Info) event Conn::log_conn(rec: Conn::Info)
{ {
if ( rec$duration > 5mins ) if ( rec$duration > 5mins )
NOTICE([$note=Long_Conn_Found, NOTICE([$note=Long_Conn_Found,
$msg=fmt("unusually long conn to %s", rec$id$resp_h), $msg=fmt("unusually long conn to %s", rec$id$resp_h),
$id=rec$id]); $id=rec$id]);
} }
@ -335,11 +335,11 @@ example for the ``Foo`` module:
# Define a hook event. By convention, this is called # Define a hook event. By convention, this is called
# "log_<stream>". # "log_<stream>".
global log_foo: event(rec: Info); global log_foo: event(rec: Info);
} }
# This event should be handled at a higher priority so that when # This event should be handled at a higher priority so that when
# users modify your stream later and they do it at priority 0, # users modify your stream later and they do it at priority 0,
# their code runs after this. # their code runs after this.
event bro_init() &priority=5 event bro_init() &priority=5
{ {
@ -356,7 +356,7 @@ it easily accessible across event handlers:
foo: Info &optional; foo: Info &optional;
} }
Now you can use the :bro:id:`Log::write` method to output log records and Now you can use the :bro:id:`Log::write` method to output log records and
save the logged ``Foo::Info`` record into the connection record: save the logged ``Foo::Info`` record into the connection record:
.. code:: bro .. code:: bro
@ -380,11 +380,11 @@ uncommon to need to delete that data before the end of the connection.
Other Writers Other Writers
------------- -------------
Bro supports the following output formats other than ASCII: Bro supports the following built-in output formats other than ASCII:
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
logging-dataseries
logging-elasticsearch
logging-input-sqlite logging-input-sqlite
Further formats are available as external plugins.

View file

@ -64,8 +64,8 @@ expect that signature file in the same directory as the Bro script. The
default extension of the file name is ``.sig``, and Bro appends that default extension of the file name is ``.sig``, and Bro appends that
automatically when necessary. automatically when necessary.
Signature language Signature Language for Network Traffic
================== ======================================
Let's look at the format of a signature more closely. Each individual Let's look at the format of a signature more closely. Each individual
signature has the format ``signature <id> { <attributes> }``. ``<id>`` signature has the format ``signature <id> { <attributes> }``. ``<id>``
@ -286,6 +286,44 @@ two actions defined:
connection (``"http"``, ``"ftp"``, etc.). This is used by Bro's connection (``"http"``, ``"ftp"``, etc.). This is used by Bro's
dynamic protocol detection to activate analyzers on the fly. dynamic protocol detection to activate analyzers on the fly.
Signature Language for File Content
===================================
The signature framework can also be used to identify MIME types of files
irrespective of the network protocol/connection over which the file is
transferred. A special type of signature can be written for this
purpose and will be used automatically by the :doc:`Files Framework
<file-analysis>` or by Bro scripts that use the :bro:see:`file_magic`
built-in function.
Conditions
----------
File signatures use a single type of content condition in the form of a
regular expression:
``file-magic /<regular expression>/``
This is analogous to the ``payload`` content condition for the network
traffic signature language described above. The difference is that
``payload`` signatures are applied to payloads of network connections,
but ``file-magic`` can be applied to any arbitrary data, it does not
have to be tied to a network protocol/connection.
Actions
-------
Upon matching a chunk of data, file signatures use the following action
to get information about that data's MIME type:
``file-mime <string> [, <integer>]``
The arguments include the MIME type string associated with the file
magic regular expression and an optional "strength" as a signed integer.
Since multiple file magic signatures may match against a given chunk of
data, the strength value may be used to help choose a "winner". Higher
values are considered stronger.
Things to keep in mind when writing signatures Things to keep in mind when writing signatures
============================================== ==============================================

View file

@ -12,8 +12,10 @@ Introduction Section
:maxdepth: 2 :maxdepth: 2
intro/index.rst intro/index.rst
cluster/index.rst
install/index.rst install/index.rst
quickstart/index.rst quickstart/index.rst
configuration/index.rst
.. ..
@ -29,7 +31,7 @@ Using Bro Section
httpmonitor/index.rst httpmonitor/index.rst
broids/index.rst broids/index.rst
mimestats/index.rst mimestats/index.rst
cluster/index.rst scripting/index.rst
.. ..
@ -39,12 +41,17 @@ Reference Section
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
scripting/index.rst
frameworks/index.rst frameworks/index.rst
script-reference/index.rst script-reference/index.rst
components/index.rst components/index.rst
.. Development
===========
.. toctree::
:maxdepth: 2
devel/plugins.rst
* :ref:`General Index <genindex>` * :ref:`General Index <genindex>`
* :ref:`search` * :ref:`search`

View file

@ -1,16 +1,21 @@
.. _upgrade-guidelines: .. _upgrade-guidelines:
================== ==============
General Guidelines How to Upgrade
================== ==============
If you're doing an upgrade install (rather than a fresh install), If you're doing an upgrade install (rather than a fresh install),
there's two suggested approaches: either install Bro using the same there's two suggested approaches: either install Bro using the same
installation prefix directory as before, or pick a new prefix and copy installation prefix directory as before, or pick a new prefix and copy
local customizations over. In the following we summarize general local customizations over. Regardless of which approach you choose,
guidelines for upgrading, see the :ref:`release-notes` for if you are using BroControl, then after upgrading Bro you will need to
version-specific information. run "broctl check" (to verify that your new configuration is OK)
and "broctl install" to complete the upgrade process.
In the following we summarize general guidelines for upgrading, see
the :ref:`release-notes` for version-specific information.
Reusing Previous Install Prefix Reusing Previous Install Prefix
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View file

@ -35,7 +35,7 @@ before you begin:
To build Bro from source, the following additional dependencies are required: To build Bro from source, the following additional dependencies are required:
* CMake 2.8.0 or greater (http://www.cmake.org) * CMake 2.6.3 or greater (http://www.cmake.org)
* Make * Make
* C/C++ compiler * C/C++ compiler
* SWIG (http://www.swig.org) * SWIG (http://www.swig.org)
@ -80,7 +80,7 @@ that ``bash`` and ``python`` are in your ``PATH``):
Distributions of these dependencies can likely be obtained from your Distributions of these dependencies can likely be obtained from your
preferred Mac OS X package management system (e.g. MacPorts_, Fink_, preferred Mac OS X package management system (e.g. MacPorts_, Fink_,
or Homebrew_). Specifically for MacPorts, the ``cmake``, ``swig``, or Homebrew_). Specifically for MacPorts, the ``cmake``, ``swig``,
``swig-python`` and packages provide the required dependencies. and ``swig-python`` packages provide the required dependencies.
Optional Dependencies Optional Dependencies
@ -91,7 +91,6 @@ build time:
* LibGeoIP (for geolocating IP addresses) * LibGeoIP (for geolocating IP addresses)
* sendmail (enables Bro and BroControl to send mail) * sendmail (enables Bro and BroControl to send mail)
* gawk (enables all features of bro-cut)
* curl (used by a Bro script that implements active HTTP) * curl (used by a Bro script that implements active HTTP)
* gperftools (tcmalloc is used to improve memory and CPU usage) * gperftools (tcmalloc is used to improve memory and CPU usage)
* ipsumdump (for trace-summary; http://www.cs.ucla.edu/~kohler/ipsumdump) * ipsumdump (for trace-summary; http://www.cs.ucla.edu/~kohler/ipsumdump)
@ -181,9 +180,14 @@ automatically. Finally, use ``make install-aux`` to install some of
the other programs that are in the ``aux/bro-aux`` directory. the other programs that are in the ``aux/bro-aux`` directory.
OpenBSD users, please see our `FAQ OpenBSD users, please see our `FAQ
<http://www.bro.org/documentation/faq.html>`_ if you are having <//www.bro.org/documentation/faq.html>`_ if you are having
problems installing Bro. problems installing Bro.
Finally, if you want to build the Bro documentation (not required, because
all of the documentation for the latest Bro release is available on the
Bro web site), there are instructions in ``doc/README`` in the source
distribution.
Configure the Run-Time Environment Configure the Run-Time Environment
================================== ==================================

View file

@ -111,7 +111,9 @@ default, including:
such "crud" that is usually not worth following up on. such "crud" that is usually not worth following up on.
As you can see, some log files are specific to a particular protocol, As you can see, some log files are specific to a particular protocol,
while others aggregate information across different types of activity. while others aggregate information across different types of activity.
For a complete list of log files and a description of its purpose,
see :doc:`Log Files <../script-reference/log-files>`.
.. _bro-cut: .. _bro-cut:
@ -162,8 +164,8 @@ tools like ``awk`` allow you to indicate the log file as a command
line option, bro-cut only takes input through redirection such as line option, bro-cut only takes input through redirection such as
``|`` and ``<``. There are a couple of ways to direct log file data ``|`` and ``<``. There are a couple of ways to direct log file data
into ``bro-cut``, each dependent upon the type of log file you're into ``bro-cut``, each dependent upon the type of log file you're
processing. A caveat of its use, however, is that the 8 lines of processing. A caveat of its use, however, is that all of the
header data must be present. header lines must be present.
.. note:: .. note::
@ -177,8 +179,8 @@ moving the current log file into a directory with format
``YYYY-MM-DD`` and gzip compressing the file with a file format that ``YYYY-MM-DD`` and gzip compressing the file with a file format that
includes the log file type and time range of the file. In the case of includes the log file type and time range of the file. In the case of
processing a compressed log file you simply adjust your command line processing a compressed log file you simply adjust your command line
tools to use the complementary ``z*`` versions of commands such as cat tools to use the complementary ``z*`` versions of commands such as ``cat``
(``zcat``), ``grep`` (``zgrep``), and ``head`` (``zhead``). (``zcat``) or ``grep`` (``zgrep``).
Working with Timestamps Working with Timestamps
----------------------- -----------------------
@ -250,44 +252,3 @@ protocol, it can have multiple ``GET``/``POST``/etc requests in a
stream and Bro is able to extract and track that information for you, stream and Bro is able to extract and track that information for you,
giving you an in-depth and structured view into HTTP traffic on your giving you an in-depth and structured view into HTTP traffic on your
network. network.
-----------------------
Common Log Files
-----------------------
As a monitoring tool, Bro records a detailed view of the traffic inspected
and the events generated in a series of relevant log files. These files can
later be reviewed for monitoring, auditing and troubleshooting purposes.
In this section we present a brief explanation of the most commonly used log
files generated by Bro including links to descriptions of some of the fields
for each log type.
+-----------------+---------------------------------------+------------------------------+
| Log File | Description | Field Descriptions |
+=================+=======================================+==============================+
| http.log | Shows all HTTP requests and replies | :bro:type:`HTTP::Info` |
+-----------------+---------------------------------------+------------------------------+
| ftp.log | Records FTP activity | :bro:type:`FTP::Info` |
+-----------------+---------------------------------------+------------------------------+
| ssl.log | Records SSL sessions including | :bro:type:`SSL::Info` |
| | certificates used | |
+-----------------+---------------------------------------+------------------------------+
| known_certs.log | Includes SSL certificates used | :bro:type:`Known::CertsInfo` |
+-----------------+---------------------------------------+------------------------------+
| smtp.log | Summarizes SMTP traffic on a network | :bro:type:`SMTP::Info` |
+-----------------+---------------------------------------+------------------------------+
| dns.log | Shows all DNS activity on a network | :bro:type:`DNS::Info` |
+-----------------+---------------------------------------+------------------------------+
| conn.log | Records all connections seen by Bro | :bro:type:`Conn::Info` |
+-----------------+---------------------------------------+------------------------------+
| dpd.log | Shows network activity on | :bro:type:`DPD::Info` |
| | non-standard ports | |
+-----------------+---------------------------------------+------------------------------+
| files.log | Records information about all files | :bro:type:`Files::Info` |
| | transmitted over the network | |
+-----------------+---------------------------------------+------------------------------+
| weird.log | Records unexpected protocol-level | :bro:type:`Weird::Info` |
| | activity | |
+-----------------+---------------------------------------+------------------------------+

View file

@ -1,5 +1,5 @@
.. _FAQ: http://www.bro.org/documentation/faq.html .. _FAQ: //www.bro.org/documentation/faq.html
.. _quickstart: .. _quickstart:
@ -12,8 +12,10 @@ Quick Start Guide
Bro works on most modern, Unix-based systems and requires no custom Bro works on most modern, Unix-based systems and requires no custom
hardware. It can be downloaded in either pre-built binary package or hardware. It can be downloaded in either pre-built binary package or
source code forms. See :ref:`installing-bro` for instructions on how to source code forms. See :ref:`installing-bro` for instructions on how to
install Bro. Below, ``$PREFIX`` is used to reference the Bro install Bro.
installation root directory, which by default is ``/usr/local/bro/`` if
In the examples below, ``$PREFIX`` is used to reference the Bro
installation root directory, which by default is ``/usr/local/bro`` if
you install from source. you install from source.
Managing Bro with BroControl Managing Bro with BroControl
@ -21,7 +23,10 @@ Managing Bro with BroControl
BroControl is an interactive shell for easily operating/managing Bro BroControl is an interactive shell for easily operating/managing Bro
installations on a single system or even across multiple systems in a installations on a single system or even across multiple systems in a
traffic-monitoring cluster. traffic-monitoring cluster. This section explains how to use BroControl
to manage a stand-alone Bro installation. For instructions on how to
configure a Bro cluster, see the :doc:`Cluster Configuration
<../configuration/index>` documentation.
A Minimal Starting Configuration A Minimal Starting Configuration
-------------------------------- --------------------------------
@ -229,7 +234,7 @@ is valid before installing it and then restarting the Bro instance:
.. console:: .. console::
[BroControl] > check [BroControl] > check
bro is ok. bro scripts are ok.
[BroControl] > install [BroControl] > install
removing old policies in /usr/local/bro/spool/policy/site ... done. removing old policies in /usr/local/bro/spool/policy/site ... done.
removing old policies in /usr/local/bro/spool/policy/auto ... done. removing old policies in /usr/local/bro/spool/policy/auto ... done.
@ -245,15 +250,15 @@ is valid before installing it and then restarting the Bro instance:
Now that the SSL notice is ignored, let's look at how to send an email on Now that the SSL notice is ignored, let's look at how to send an email on
the SSH notice. The notice framework has a similar option called the SSH notice. The notice framework has a similar option called
``emailed_types``, but that can't differentiate between SSH servers and we ``emailed_types``, but using that would generate email for all SSH servers and
only want email for logins to certain ones. Then we come to the ``PolicyItem`` we only want email for logins to certain ones. There is a ``policy`` hook
record and ``policy`` set and realize that those are actually what get used that is actually what is used to implement the simple functionality of
to implement the simple functionality of ``ignored_types`` and ``ignored_types`` and
``emailed_types``, but it's extensible such that the condition and action taken ``emailed_types``, but it's extensible such that the condition and action taken
on notices can be user-defined. on notices can be user-defined.
In ``local.bro``, let's add a new ``PolicyItem`` record to the ``policy`` set In ``local.bro``, let's define a new ``policy`` hook handler body
that only takes the email action for SSH logins to a defined set of servers: that takes the email action for SSH logins only for a defined set of servers:
.. code:: bro .. code:: bro
@ -271,9 +276,9 @@ that only takes the email action for SSH logins to a defined set of servers:
You'll just have to trust the syntax for now, but what we've done is You'll just have to trust the syntax for now, but what we've done is
first declare our own variable to hold a set of watched addresses, first declare our own variable to hold a set of watched addresses,
``watched_servers``; then added a record to the policy that will generate ``watched_servers``; then added a hook handler body to the policy that will
an email on the condition that the predicate function evaluates to true, which generate an email whenever the notice type is an SSH login and the responding
is whenever the notice type is an SSH login and the responding host stored host stored
inside the ``Info`` record's connection field is in the set of watched servers. inside the ``Info`` record's connection field is in the set of watched servers.
.. note:: Record field member access is done with the '$' character .. note:: Record field member access is done with the '$' character
@ -421,7 +426,7 @@ Running Bro Without Installing
For developers that wish to run Bro directly from the ``build/`` For developers that wish to run Bro directly from the ``build/``
directory (i.e., without performing ``make install``), they will have directory (i.e., without performing ``make install``), they will have
to first adjust ``BROPATH`` and ``BROMAGIC`` to look for scripts and to first adjust ``BROPATH`` to look for scripts and
additional files inside the build directory. Sourcing either additional files inside the build directory. Sourcing either
``build/bro-path-dev.sh`` or ``build/bro-path-dev.csh`` as appropriate ``build/bro-path-dev.sh`` or ``build/bro-path-dev.csh`` as appropriate
for the current shell accomplishes this and also augments your for the current shell accomplishes this and also augments your

View file

@ -0,0 +1,232 @@
Attributes
==========
The Bro scripting language supports the following attributes.
+-----------------------------+-----------------------------------------------+
| Name | Description |
+=============================+===============================================+
| :bro:attr:`&redef` |Redefine a global constant or extend a type. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&priority` |Specify priority for event handler or hook. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&log` |Mark a record field as to be written to a log. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&optional` |Allow a record field value to be missing. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&default` |Specify a default value. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&add_func` |Specify a function to call for each "redef +=".|
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&delete_func` |Same as "&add_func", except for "redef -=". |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&expire_func` |Specify a function to call when container |
| |element expires. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&read_expire` |Specify a read timeout interval. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&write_expire` |Specify a write timeout interval. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&create_expire` |Specify a creation timeout interval. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&synchronized` |Synchronize a variable across nodes. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&persistent` |Make a variable persistent (written to disk). |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&rotate_interval`|Rotate a file after specified interval. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&rotate_size` |Rotate a file after specified file size. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&encrypt` |Encrypt a file when writing to disk. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&raw_output` |Open file in raw mode (chars. are not escaped).|
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&mergeable` |Prefer set union for synchronized state. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&group` |Group event handlers to activate/deactivate. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&error_handler` |Used internally for reporter framework events. |
+-----------------------------+-----------------------------------------------+
| :bro:attr:`&type_column` |Used by input framework for "port" type. |
+-----------------------------+-----------------------------------------------+
Here is a more detailed explanation of each attribute:
.. bro:attr:: &redef
Allows for redefinition of initial values of global objects declared as
constant.
In this example, the constant (assuming it is global) can be redefined
with a :bro:keyword:`redef` at some later point::
const clever = T &redef;
.. bro:attr:: &priority
Specifies the execution priority (as a signed integer) of a hook or
event handler. Higher values are executed before lower ones. The
default value is 0. Example::
event bro_init() &priority=10
{
print "high priority";
}
.. bro:attr:: &log
Writes a :bro:type:`record` field to the associated log stream.
.. bro:attr:: &optional
Allows a record field value to be missing (i.e., neither initialized nor
ever assigned a value).
In this example, the record could be instantiated with either
"myrec($a=127.0.0.1)" or "myrec($a=127.0.0.1, $b=80/tcp)"::
type myrec: record { a: addr; b: port &optional; };
The ``?$`` operator can be used to check if a record field has a value or
not (it returns a ``bool`` value of ``T`` if the field has a value,
and ``F`` if not).
.. bro:attr:: &default
Specifies a default value for a record field, container element, or a
function/hook/event parameter.
In this example, the record could be instantiated with either
"myrec($a=5, $c=3.14)" or "myrec($a=5, $b=53/udp, $c=3.14)"::
type myrec: record { a: count; b: port &default=80/tcp; c: double; };
In this example, the table will return the string ``"foo"`` for any
attempted access to a non-existing index::
global mytable: table[count] of string &default="foo";
When used with function/hook/event parameters, all of the parameters
with the "&default" attribute must come after all other parameters.
For example, the following function could be called either as "myfunc(5)"
or as "myfunc(5, 53/udp)"::
function myfunc(a: count, b: port &default=80/tcp)
{
print a, b;
}
.. bro:attr:: &add_func
Can be applied to an identifier with &redef to specify a function to
be called any time a "redef <id> += ..." declaration is parsed. The
function takes two arguments of the same type as the identifier, the first
being the old value of the variable and the second being the new
value given after the "+=" operator in the "redef" declaration. The
return value of the function will be the actual new value of the
variable after the "redef" declaration is parsed.
.. bro:attr:: &delete_func
Same as :bro:attr:`&add_func`, except for :bro:keyword:`redef` declarations
that use the "-=" operator.
.. bro:attr:: &expire_func
Called right before a container element expires. The function's
first parameter is of the same type of the container and the second
parameter the same type of the container's index. The return
value is an :bro:type:`interval` indicating the amount of additional
time to wait before expiring the container element at the given
index (which will trigger another execution of this function).
.. bro:attr:: &read_expire
Specifies a read expiration timeout for container elements. That is,
the element expires after the given amount of time since the last
time it has been read. Note that a write also counts as a read.
.. bro:attr:: &write_expire
Specifies a write expiration timeout for container elements. That
is, the element expires after the given amount of time since the
last time it has been written.
.. bro:attr:: &create_expire
Specifies a creation expiration timeout for container elements. That
is, the element expires after the given amount of time since it has
been inserted into the container, regardless of any reads or writes.
.. bro:attr:: &synchronized
Synchronizes variable accesses across nodes. The value of a
``&synchronized`` variable is automatically propagated to all peers
when it changes.
.. bro:attr:: &persistent
Makes a variable persistent, i.e., its value is written to disk (per
default at shutdown time).
.. bro:attr:: &rotate_interval
Rotates a file after a specified interval.
.. bro:attr:: &rotate_size
Rotates a file after it has reached a given size in bytes.
.. bro:attr:: &encrypt
Encrypts files right before writing them to disk.
.. bro:attr:: &raw_output
Opens a file in raw mode, i.e., non-ASCII characters are not
escaped.
.. bro:attr:: &mergeable
Prefers merging sets on assignment for synchronized state. This
attribute is used in conjunction with :bro:attr:`&synchronized`
container types: when the same container is updated at two peers
with different values, the propagation of the state causes a race
condition, where the last update succeeds. This can cause
inconsistencies and can be avoided by unifying the two sets, rather
than merely overwriting the old value.
.. bro:attr:: &group
Groups event handlers such that those in the same group can be
jointly activated or deactivated.
.. bro:attr:: &error_handler
Internally set on the events that are associated with the reporter
framework: :bro:id:`reporter_info`, :bro:id:`reporter_warning`, and
:bro:id:`reporter_error`. It prevents any handlers of those events
from being able to generate reporter messages that go through any of
those events (i.e., it prevents an infinite event recursion). Instead,
such nested reporter messages are output to stderr.
.. bro:attr:: &type_column
Used by the input framework. It can be used on columns of type
:bro:type:`port` (such a column only contains the port number) and
specifies the name of an additional column in
the input file which specifies the protocol of the port (tcp/udp/icmp).
In the following example, the input file would contain four columns
named "ip", "srcp", "proto", and "msg"::
type Idx: record {
ip: addr;
};
type Val: record {
srcp: port &type_column = "proto";
msg: string;
};

View file

@ -0,0 +1,173 @@
Directives
==========
The Bro scripting language supports a number of directives that can
affect which scripts will be loaded or which lines in a script will be
executed. Directives are evaluated before script execution begins.
.. bro:keyword:: @DEBUG
TODO
.. bro:keyword:: @DIR
Expands to the directory pathname where the current script is located.
Example::
print "Directory:", @DIR;
.. bro:keyword:: @FILENAME
Expands to the filename of the current script.
Example::
print "File:", @FILENAME;
.. bro:keyword:: @load
Loads the specified Bro script, specified as the relative pathname
of the file (relative to one of the directories in Bro's file search path).
If the Bro script filename ends with ".bro", then you don't need to
specify the file extension. The filename cannot contain any whitespace.
In this example, Bro will try to load a script
"policy/misc/capture-loss.bro" by looking in each directory in the file
search path (the file search path can be changed by setting the BROPATH
environment variable)::
@load policy/misc/capture-loss
If you specify the name of a directory instead of a filename, then
Bro will try to load a file in that directory called "__load__.bro"
(presumably that file will contain additional "@load" directives).
In this example, Bro will try to load a file "tuning/defaults/__load__.bro"
by looking in each directory in the file search path::
@load tuning/defaults
The purpose of this directive is to ensure that all script dependencies
are satisfied, and to avoid having to list every needed Bro script
on the command-line. Bro keeps track of which scripts have been
loaded, so it is not an error to load a script more than once (once
a script has been loaded, any subsequent "@load" directives
for that script are ignored).
.. bro:keyword:: @load-sigs
This works similarly to "@load", except that in this case the filename
represents a signature file (not a Bro script). If the signature filename
ends with ".sig", then you don't need to specify the file extension
in the "@load-sigs" directive. The filename cannot contain any
whitespace.
In this example, Bro will try to load a signature file
"base/protocols/ssl/dpd.sig"::
@load-sigs base/protocols/ssl/dpd
The format for a signature file is explained in the documentation for the
`Signature Framework <../frameworks/signatures.html>`_.
.. bro:keyword:: @unload
This specifies a Bro script that we don't want to load (so a subsequent
attempt to load the specified script will be skipped). However,
if the specified script has already been loaded, then this directive
has no affect.
In the following example, if the "policy/misc/capture-loss.bro" script
has not been loaded yet, then Bro will not load it::
@unload policy/misc/capture-loss
.. bro:keyword:: @prefixes
Specifies a filename prefix to use when looking for script files
to load automatically. The prefix cannot contain any whitespace.
In the following example, the prefix "cluster" is used and all prefixes
that were previously specified are not used::
@prefixes = cluster
In the following example, the prefix "cluster-manager" is used in
addition to any previously-specified prefixes::
@prefixes += cluster-manager
The way this works is that after Bro parses all script files, then for each
loaded script Bro will take the absolute path of the script and then
it removes the portion of the directory path that is in Bro's file
search path. Then it replaces each "/" character with a period "."
and then prepends the prefix (specified in the "@prefixes" directive)
followed by a period. The resulting filename is searched for in each
directory in Bro's file search path. If a matching file is found, then
the file is automatically loaded.
For example, if a script called "local.bro" has been loaded, and a prefix
of "test" was specified, then Bro will look for a file named
"test.local.bro" in each directory of Bro's file search path.
An alternative way to specify prefixes is to use the "-p" Bro
command-line option.
.. bro:keyword:: @if
The specified expression must evaluate to type :bro:type:`bool`. If the
value is true, then the following script lines (up to the next "@else"
or "@endif") are available to be executed.
Example::
@if ( ver == 2 )
print "version 2 detected";
@endif
.. bro:keyword:: @ifdef
This works like "@if", except that the result is true if the specified
identifier is defined.
Example::
@ifdef ( pi )
print "pi is defined";
@endif
.. bro:keyword:: @ifndef
This works exactly like "@ifdef", except that the result is true if the
specified identifier is not defined.
Example::
@ifndef ( pi )
print "pi is not defined";
@endif
.. bro:keyword:: @else
This directive is optional after an "@if", "@ifdef", or
"@ifndef". If present, it provides an else clause.
Example::
@ifdef ( pi )
print "pi is defined";
@else
print "pi is not defined";
@endif
.. bro:keyword:: @endif
This directive is required to terminate each "@if", "@ifdef", or
"@ifndef".

View file

@ -5,10 +5,17 @@ Script Reference
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
operators
types
attributes
statements
directives
log-files
notices notices
proto-analyzers proto-analyzers
file-analyzers file-analyzers
builtins
packages packages
scripts scripts
Broxygen Example Script </scripts/broxygen/example.bro> Broxygen Example Script </scripts/broxygen/example.bro>

View file

@ -0,0 +1,148 @@
=========
Log Files
=========
Listed below are the log files generated by Bro, including a brief description
of the log file and links to descriptions of the fields for each log
type.
Network Protocols
-----------------
+----------------------------+---------------------------------------+---------------------------------+
| Log File | Description | Field Descriptions |
+============================+=======================================+=================================+
| conn.log | TCP/UDP/ICMP connections | :bro:type:`Conn::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| dhcp.log | DHCP leases | :bro:type:`DHCP::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| dnp3.log | DNP3 requests and replies | :bro:type:`DNP3::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| dns.log | DNS activity | :bro:type:`DNS::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| ftp.log | FTP activity | :bro:type:`FTP::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| http.log | HTTP requests and replies | :bro:type:`HTTP::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| irc.log | IRC commands and responses | :bro:type:`IRC::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| modbus.log | Modbus commands and responses | :bro:type:`Modbus::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| modbus_register_change.log | Tracks changes to Modbus holding | :bro:type:`Modbus::MemmapInfo` |
| | registers | |
+----------------------------+---------------------------------------+---------------------------------+
| radius.log | RADIUS authentication attempts | :bro:type:`RADIUS::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| smtp.log | SMTP transactions | :bro:type:`SMTP::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| snmp.log | SNMP messages | :bro:type:`SNMP::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| socks.log | SOCKS proxy requests | :bro:type:`SOCKS::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| ssh.log | SSH connections | :bro:type:`SSH::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| ssl.log | SSL/TLS handshake info | :bro:type:`SSL::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| syslog.log | Syslog messages | :bro:type:`Syslog::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| tunnel.log | Tunneling protocol events | :bro:type:`Tunnel::Info` |
+----------------------------+---------------------------------------+---------------------------------+
Files
-----
+----------------------------+---------------------------------------+---------------------------------+
| Log File | Description | Field Descriptions |
+============================+=======================================+=================================+
| files.log | File analysis results | :bro:type:`Files::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| x509.log | X.509 certificate info | :bro:type:`X509::Info` |
+----------------------------+---------------------------------------+---------------------------------+
Detection
---------
+----------------------------+---------------------------------------+---------------------------------+
| Log File | Description | Field Descriptions |
+============================+=======================================+=================================+
| intel.log | Intelligence data matches | :bro:type:`Intel::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| notice.log | Bro notices | :bro:type:`Notice::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| notice_alarm.log | The alarm stream | :bro:enum:`Notice::ACTION_ALARM`|
+----------------------------+---------------------------------------+---------------------------------+
| signatures.log | Signature matches | :bro:type:`Signatures::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| traceroute.log | Traceroute detection | :bro:type:`Traceroute::Info` |
+----------------------------+---------------------------------------+---------------------------------+
Network Observations
--------------------
+----------------------------+---------------------------------------+---------------------------------+
| Log File | Description | Field Descriptions |
+============================+=======================================+=================================+
| app_stats.log | Web app usage statistics | :bro:type:`AppStats::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| known_certs.log | SSL certificates | :bro:type:`Known::CertsInfo` |
+----------------------------+---------------------------------------+---------------------------------+
| known_devices.log | MAC addresses of devices on the | :bro:type:`Known::DevicesInfo` |
| | network | |
+----------------------------+---------------------------------------+---------------------------------+
| known_hosts.log | Hosts that have completed TCP | :bro:type:`Known::HostsInfo` |
| | handshakes | |
+----------------------------+---------------------------------------+---------------------------------+
| known_modbus.log | Modbus masters and slaves | :bro:type:`Known::ModbusInfo` |
+----------------------------+---------------------------------------+---------------------------------+
| known_services.log | Services running on hosts | :bro:type:`Known::ServicesInfo` |
+----------------------------+---------------------------------------+---------------------------------+
| software.log | Software being used on the network | :bro:type:`Software::Info` |
+----------------------------+---------------------------------------+---------------------------------+
Miscellaneous
-------------
+----------------------------+---------------------------------------+---------------------------------+
| Log File | Description | Field Descriptions |
+============================+=======================================+=================================+
| barnyard2.log | Alerts received from Barnyard2 | :bro:type:`Barnyard2::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| dpd.log | Dynamic protocol detection failures | :bro:type:`DPD::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| unified2.log | Interprets Snort's unified output | :bro:type:`Unified2::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| weird.log | Unexpected network-level activity | :bro:type:`Weird::Info` |
+----------------------------+---------------------------------------+---------------------------------+
Bro Diagnostics
---------------
+----------------------------+---------------------------------------+---------------------------------+
| Log File | Description | Field Descriptions |
+============================+=======================================+=================================+
| capture_loss.log | Packet loss rate | :bro:type:`CaptureLoss::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| cluster.log | Bro cluster messages | :bro:type:`Cluster::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| communication.log | Communication events between Bro or | :bro:type:`Communication::Info` |
| | Broccoli instances | |
+----------------------------+---------------------------------------+---------------------------------+
| loaded_scripts.log | Shows all scripts loaded by Bro | :bro:type:`LoadedScripts::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| packet_filter.log | List packet filters that were applied | :bro:type:`PacketFilter::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| prof.log | Profiling statistics (to create this | N/A |
| | log, load policy/misc/profiling.bro) | |
+----------------------------+---------------------------------------+---------------------------------+
| reporter.log | Internal error/warning/info messages | :bro:type:`Reporter::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| stats.log | Memory/event/packet/lag statistics | :bro:type:`Stats::Info` |
+----------------------------+---------------------------------------+---------------------------------+
| stderr.log | Captures standard error when Bro is | N/A |
| | started from BroControl | |
+----------------------------+---------------------------------------+---------------------------------+
| stdout.log | Captures standard output when Bro is | N/A |
| | started from BroControl | |
+----------------------------+---------------------------------------+---------------------------------+

View file

@ -0,0 +1,191 @@
Operators
=========
The Bro scripting language supports the following operators. Note that
each data type only supports a subset of these operators. For more
details, see the documentation about the `data types <types.html>`_.
Relational operators
--------------------
The relational operators evaluate to type :bro:type:`bool`.
+------------------------------+--------------+
| Name | Syntax |
+==============================+==============+
| Equality | *a* == *b* |
+------------------------------+--------------+
| Inequality | *a* != *b* |
+------------------------------+--------------+
| Less than | *a* < *b* |
+------------------------------+--------------+
| Less than or equal | *a* <= *b* |
+------------------------------+--------------+
| Greater than | *a* > *b* |
+------------------------------+--------------+
| Greater than or equal | *a* >= *b* |
+------------------------------+--------------+
Logical operators
-----------------
The logical operators require operands of type :bro:type:`bool`, and
evaluate to type :bro:type:`bool`.
+------------------------------+--------------+
| Name | Syntax |
+==============================+==============+
| Logical AND | *a* && *b* |
+------------------------------+--------------+
| Logical OR | *a* \|\| *b* |
+------------------------------+--------------+
| Logical NOT | ! *a* |
+------------------------------+--------------+
Arithmetic operators
--------------------
+------------------------------+-------------+-------------------------------+
| Name | Syntax | Notes |
+==============================+=============+===============================+
| Addition | *a* + *b* | For :bro:type:`string` |
| | | operands, this performs |
| | | string concatenation. |
+------------------------------+-------------+-------------------------------+
| Subtraction | *a* - *b* | |
+------------------------------+-------------+-------------------------------+
| Multiplication | *a* \* *b* | |
+------------------------------+-------------+-------------------------------+
| Division | *a* / *b* | For :bro:type:`int` or |
| | | :bro:type:`count` operands, |
| | | the fractional part of the |
| | | result is dropped. |
+------------------------------+-------------+-------------------------------+
| Modulo | *a* % *b* | Operand types cannot be |
| | | "double". |
+------------------------------+-------------+-------------------------------+
| Unary plus | \+ *a* | |
+------------------------------+-------------+-------------------------------+
| Unary minus | \- *a* | |
+------------------------------+-------------+-------------------------------+
| Pre-increment | ++ *a* | Operand type cannot be |
| | | "double". |
+------------------------------+-------------+-------------------------------+
| Pre-decrement | ``--`` *a* | Operand type cannot be |
| | | "double". |
+------------------------------+-------------+-------------------------------+
| Absolute value | \| *a* \| | If operand is |
| | | :bro:type:`string`, |
| | | :bro:type:`set`, |
| | | :bro:type:`table`, or |
| | | :bro:type:`vector`, this |
| | | evaluates to number |
| | | of elements. |
+------------------------------+-------------+-------------------------------+
Assignment operators
--------------------
The assignment operators evaluate to the result of the assignment.
+------------------------------+-------------+
| Name | Syntax |
+==============================+=============+
| Assignment | *a* = *b* |
+------------------------------+-------------+
| Addition assignment | *a* += *b* |
+------------------------------+-------------+
| Subtraction assignment | *a* -= *b* |
+------------------------------+-------------+
Record field operators
----------------------
The record field operators take a :bro:type:`record` as the first operand,
and a field name as the second operand. For both operators, the specified
field name must be in the declaration of the record type.
+------------------------------+-------------+-------------------------------+
| Name | Syntax | Notes |
+==============================+=============+===============================+
| Field access | *a* $ *b* | |
+------------------------------+-------------+-------------------------------+
| Field value existence test | *a* ?$ *b* | Evaluates to type |
| | | :bro:type:`bool`. |
| | | True if the specified field |
| | | has been assigned a value, or |
| | | false if not. |
+------------------------------+-------------+-------------------------------+
Other operators
---------------
+--------------------------------+-------------------+------------------------+
| Name | Syntax | Notes |
+================================+===================+========================+
| Membership test | *a* in *b* |Evaluates to type |
| | |:bro:type:`bool`. Do not|
| | |confuse this use of "in"|
| | |with that used in a |
| | |:bro:keyword:`for` |
| | |statement. |
+--------------------------------+-------------------+------------------------+
| Non-membership test | *a* !in *b* |This is the logical NOT |
| | |of the "in" operator. |
| | |For example: "a !in b" |
| | |is equivalent to |
| | |"!(a in b)". |
+--------------------------------+-------------------+------------------------+
| Table or vector element access | *a* [ *b* ] |This operator can also |
| | |be used with a |
| | |:bro:type:`set`, but |
| | |only with the |
| | |:bro:keyword:`add` or |
| | |:bro:keyword:`delete` |
| | |statement. |
+--------------------------------+-------------------+------------------------+
| Substring extraction | *a* [ *b* : *c* ] |See the |
| | |:bro:type:`string` type |
| | |for more details. |
+--------------------------------+-------------------+------------------------+
| Create a deep copy | copy ( *a* ) |This is relevant only |
| | |for data types that are |
| | |assigned by reference, |
| | |such as |
| | |:bro:type:`vector`, |
| | |:bro:type:`set`, |
| | |:bro:type:`table`, |
| | |and :bro:type:`record`. |
+--------------------------------+-------------------+------------------------+
| Module namespace access | *a* \:\: *b* |The first operand is the|
| | |module name, and the |
| | |second operand is an |
| | |identifier that refers |
| | |to a global variable, |
| | |enumeration constant, or|
| | |user-defined type that |
| | |was exported from the |
| | |module. |
+--------------------------------+-------------------+------------------------+
| Conditional | *a* ? *b* : *c* |The first operand must |
| | |evaluate to type |
| | |:bro:type:`bool`. |
| | |If true, then the |
| | |second expression is |
| | |evaluated and is the |
| | |result of the entire |
| | |expression. Otherwise, |
| | |the third expression is |
| | |evaluated and is the |
| | |result of the entire |
| | |expression. The types of|
| | |the second and third |
| | |operands must be |
| | |compatible. |
+--------------------------------+-------------------+------------------------+

View file

@ -0,0 +1,602 @@
Declarations and Statements
===========================
The Bro scripting language supports the following declarations and
statements.
Declarations
~~~~~~~~~~~~
+----------------------------+-----------------------------+
| Name | Description |
+============================+=============================+
| :bro:keyword:`module` | Change the current module |
+----------------------------+-----------------------------+
| :bro:keyword:`export` | Export identifiers from the |
| | current module |
+----------------------------+-----------------------------+
| :bro:keyword:`global` | Declare a global variable |
+----------------------------+-----------------------------+
| :bro:keyword:`const` | Declare a constant |
+----------------------------+-----------------------------+
| :bro:keyword:`type` | Declare a user-defined type |
+----------------------------+-----------------------------+
| :bro:keyword:`redef` | Redefine a global value or |
| | extend a user-defined type |
+----------------------------+-----------------------------+
| `function/event/hook`_ | Declare a function, event |
| | handler, or hook |
+----------------------------+-----------------------------+
Statements
~~~~~~~~~~
+----------------------------+------------------------+
| Name | Description |
+============================+========================+
| :bro:keyword:`local` | Declare a local |
| | variable |
+----------------------------+------------------------+
| :bro:keyword:`add`, | Add or delete |
| :bro:keyword:`delete` | elements |
+----------------------------+------------------------+
| :bro:keyword:`print` | Print to stdout or a |
| | file |
+----------------------------+------------------------+
| :bro:keyword:`for`, | Loop over each |
| :bro:keyword:`next`, | element in a container |
| :bro:keyword:`break` | object |
+----------------------------+------------------------+
| :bro:keyword:`if` | Evaluate boolean |
| | expression and if true,|
| | execute a statement |
+----------------------------+------------------------+
| :bro:keyword:`switch`, | Evaluate expression |
| :bro:keyword:`break`, | and execute statement |
| :bro:keyword:`fallthrough` | with a matching value |
+----------------------------+------------------------+
| :bro:keyword:`when` | Asynchronous execution |
+----------------------------+------------------------+
| :bro:keyword:`event`, | Invoke or schedule |
| :bro:keyword:`schedule` | an event handler |
+----------------------------+------------------------+
| :bro:keyword:`return` | Return from function, |
| | hook, or event handler |
+----------------------------+------------------------+
Declarations
------------
The following global declarations cannot occur within a function, hook, or
event handler. Also, these declarations cannot appear after any statements
that are outside of a function, hook, or event handler.
.. bro:keyword:: module
The "module" keyword is used to change the current module. This
affects the scope of any subsequently declared global identifiers.
Example::
module mymodule;
If a global identifier is declared after a "module" declaration,
then its scope ends at the end of the current Bro script or at the
next "module" declaration, whichever comes first. However, if a
global identifier is declared after a "module" declaration, but inside
an :bro:keyword:`export` block, then its scope ends at the end of the
last loaded Bro script, but it must be referenced using the namespace
operator (``::``) in other modules.
There can be any number of "module" declarations in a Bro script.
The same "module" declaration can appear in any number of different
Bro scripts.
.. bro:keyword:: export
An "export" block contains one or more declarations
(no statements are allowed in an "export" block) that the current
module is exporting. This enables these global identifiers to be visible
in other modules (but not prior to their declaration) via the namespace
operator (``::``). See the :bro:keyword:`module` keyword for a more
detailed explanation.
Example::
export {
redef enum Log::ID += { LOG };
type Info: record {
ts: time &log;
uid: string &log;
};
const conntime = 30sec &redef;
}
Note that the braces in an "export" block are always required
(they do not indicate a compound statement). Also, no semicolon is
needed to terminate an "export" block.
.. bro:keyword:: global
Variables declared with the "global" keyword will be global.
If a type is not specified, then an initializer is required so that
the type can be inferred. Likewise, if an initializer is not supplied,
then the type must be specified. Example::
global pi = 3.14;
global hosts: set[addr];
global ciphers: table[string] of string = table();
Variable declarations outside of any function, hook, or event handler are
required to use this keyword (unless they are declared with the
:bro:keyword:`const` keyword). Definitions of functions, hooks, and
event handlers are not allowed to use the "global"
keyword (they already have global scope), except function declarations
where no function body is supplied use the "global" keyword.
The scope of a global variable begins where the declaration is located,
and extends through all remaining Bro scripts that are loaded (however,
see the :bro:keyword:`module` keyword for an explanation of how modules
change the visibility of global identifiers).
.. bro:keyword:: const
A variable declared with the "const" keyword will be constant.
Variables declared as constant are required to be initialized at the
time of declaration. Example::
const pi = 3.14;
const ssh_port: port = 22/tcp;
The value of a constant cannot be changed later (the only
exception is if the variable is global and has the :bro:attr:`&redef`
attribute, then its value can be changed only with a :bro:keyword:`redef`).
The scope of a constant is local if the declaration is in a
function, hook, or event handler, and global otherwise.
Note that the "const" keyword cannot be used with either the "local"
or "global" keywords (i.e., "const" replaces "local" and "global").
.. bro:keyword:: type
The "type" keyword is used to declare a user-defined type. The name
of this new type has global scope and can be used anywhere a built-in
type name can occur.
The "type" keyword is most commonly used when defining a
:bro:type:`record` or an :bro:type:`enum`, but is also useful when
dealing with more complex types.
Example::
type mytype: table[count] of table[addr, port] of string;
global myvar: mytype;
.. bro:keyword:: redef
There are three ways that "redef" can be used: to change the value of
a global variable, to extend a record type or enum type, or to specify
a new event handler body that replaces all those that were previously
defined.
If you're using "redef" to change a global variable (defined using either
:bro:keyword:`const` or :bro:keyword:`global`), then the variable that you
want to change must have the :bro:attr:`&redef` attribute. If the variable
you're changing is a table, set, or pattern, you can use ``+=`` to add
new elements, or you can use ``=`` to specify a new value (all previous
contents of the object are removed). If the variable you're changing is a
set or table, then you can use the ``-=`` operator to remove the
specified elements (nothing happens for specified elements that don't
exist). If the variable you are changing is not a table, set, or pattern,
then you must use the ``=`` operator.
Examples::
redef pi = 3.14;
If you're using "redef" to extend a record or enum, then you must
use the ``+=`` assignment operator.
For an enum, you can add more enumeration constants, and for a record
you can add more record fields (however, each record field in the "redef"
must have either the :bro:attr:`&optional` or :bro:attr:`&default`
attribute).
Examples::
redef enum color += { Blue, Red };
redef record MyRecord += { n2:int &optional; s2:string &optional; };
If you're using "redef" to specify a new event handler body that
replaces all those that were previously defined (i.e., any subsequently
defined event handler body will not be affected by this "redef"), then
the syntax is the same as a regular event handler definition except for
the presence of the "redef" keyword.
Example::
redef event myevent(s:string) { print "Redefined", s; }
.. _function/event/hook:
**function/event/hook**
For details on how to declare a :bro:type:`function`,
:bro:type:`event` handler, or :bro:type:`hook`,
see the documentation for those types.
Statements
----------
Each statement in a Bro script must be terminated with a semicolon (with a
few exceptions noted below). An individual statement can span multiple
lines.
All statements (except those contained within a function, hook, or event
handler) must appear after all global declarations.
Here are the statements that the Bro scripting language supports.
.. bro:keyword:: add
The "add" statement is used to add an element to a :bro:type:`set`.
Nothing happens if the specified element already exists in the set.
Example::
local myset: set[string];
add myset["test"];
.. bro:keyword:: break
The "break" statement is used to break out of a :bro:keyword:`switch` or
:bro:keyword:`for` statement.
.. bro:keyword:: delete
The "delete" statement is used to remove an element from a
:bro:type:`set` or :bro:type:`table`. Nothing happens if the
specified element does not exist in the set or table.
Example::
local myset = set("this", "test");
local mytable = table(["key1"] = 80/tcp, ["key2"] = 53/udp);
delete myset["test"];
delete mytable["key1"];
.. bro:keyword:: event
The "event" statement immediately queues invocation of an event handler.
Example::
event myevent("test", 5);
.. bro:keyword:: fallthrough
The "fallthrough" statement can be used as the last statement in a
"case" block to indicate that execution should continue into the
next "case" or "default" label.
For an example, see the :bro:keyword:`switch` statement.
.. bro:keyword:: for
A "for" loop iterates over each element in a string, set, vector, or
table and executes a statement for each iteration.
For each iteration of the loop, a loop variable will be assigned to an
element if the expression evaluates to a string or set, or an index if
the expression evaluates to a vector or table. Then the statement
is executed. However, the statement will not be executed if the expression
evaluates to an object with no elements.
If the expression is a table or a set with more than one index, then the
loop variable must be specified as a comma-separated list of different
loop variables (one for each index), enclosed in brackets.
A :bro:keyword:`break` statement can be used at any time to immediately
terminate the "for" loop, and a :bro:keyword:`next` statement can be
used to skip to the next loop iteration.
Note that the loop variable in a "for" statement is not allowed to be
a global variable, and it does not need to be declared prior to the "for"
statement. The type will be inferred from the elements of the
expression.
Example::
local myset = set(80/tcp, 81/tcp);
local mytable = table([10.0.0.1, 80/tcp]="s1", [10.0.0.2, 81/tcp]="s2");
for (p in myset)
print p;
for ([i,j] in mytable) {
if (mytable[i,j] == "done")
break;
if (mytable[i,j] == "skip")
next;
print i,j;
}
.. bro:keyword:: if
Evaluates a given expression, which must yield a :bro:type:`bool` value.
If true, then a specified statement is executed. If false, then
the statement is not executed. Example::
if ( x == 2 ) print "x is 2";
However, if the expression evaluates to false and if an "else" is
provided, then the statement following the "else" is executed. Example::
if ( x == 2 )
print "x is 2";
else
print "x is not 2";
.. bro:keyword:: local
A variable declared with the "local" keyword will be local. If a type
is not specified, then an initializer is required so that the type can
be inferred. Likewise, if an initializer is not supplied, then the
type must be specified.
Examples::
local x1 = 5.7;
local x2: double;
local x3: double = 5.7;
Variable declarations inside a function, hook, or event handler are
required to use this keyword (the only two exceptions are variables
declared with :bro:keyword:`const`, and variables implicitly declared in a
:bro:keyword:`for` statement).
The scope of a local variable starts at the location where it is declared
and persists to the end of the function, hook,
or event handler in which it is declared (this is true even if the
local variable was declared within a `compound statement`_ or is the loop
variable in a "for" statement).
.. bro:keyword:: next
The "next" statement can only appear within a :bro:keyword:`for` loop.
It causes execution to skip to the next iteration.
For an example, see the :bro:keyword:`for` statement.
.. bro:keyword:: print
The "print" statement takes a comma-separated list of one or more
expressions. Each expression in the list is evaluated and then converted
to a string. Then each string is printed, with each string separated by
a comma in the output.
Examples::
print 3.14;
print "Results", x, y;
By default, the "print" statement writes to the standard
output (stdout). However, if the first expression is of type
:bro:type:`file`, then "print" writes to that file.
If a string contains non-printable characters (i.e., byte values that are
not in the range 32 - 126), then the "print" statement converts each
non-printable character to an escape sequence before it is printed.
For more control over how the strings are formatted, see the :bro:id:`fmt`
function.
.. bro:keyword:: return
The "return" statement immediately exits the current function, hook, or
event handler. For a function, the specified expression (if any) is
evaluated and returned. A "return" statement in a hook or event handler
cannot return a value because event handlers and hooks do not have
return types.
Examples::
function my_func(): string
{
return "done";
}
event my_event(n: count)
{
if ( n == 0 ) return;
print n;
}
There is a special form of the "return" statement that is only allowed
in functions. Syntactically, it looks like a :bro:keyword:`when` statement
immediately preceded by the "return" keyword. This form of the "return"
statement is used to specify a function that delays its result (such a
function can only be called in the expression of a :bro:keyword:`when`
statement). The function returns at the time the "when"
statement's condition becomes true, and the function returns the value
that the "when" statement's body returns (or if the condition does
not become true within the specified timeout interval, then the function
returns the value that the "timeout" block returns).
Example::
global X: table[string] of count;
function a() : count
{
# This delays until condition becomes true.
return when ( "a" in X )
{
return X["a"];
}
timeout 30 sec
{
return 0;
}
}
event bro_init()
{
# Installs a trigger which fires if a() returns 42.
when ( a() == 42 )
print "expected result";
print "Waiting for a() to return...";
X["a"] = 42;
}
.. bro:keyword:: schedule
The "schedule" statement is used to raise a specified event with
specified parameters at a later time specified as an :bro:type:`interval`.
Example::
schedule 30sec { myevent(x, y, z) };
Note that the braces are always required (they do not indicate a
`compound statement`_).
Note that "schedule" is actually an expression that returns a value
of type "timer", but in practice the return value is not used.
.. bro:keyword:: switch
A "switch" statement evaluates a given expression and jumps to
the first "case" label which contains a matching value (the result of the
expression must be type-compatible with all of the values in all of the
"case" labels). If there is no matching value, then execution jumps to
the "default" label instead, and if there is no "default" label then
execution jumps out of the "switch" block.
Here is an example (assuming that "get_day_of_week" is a
function that returns a string)::
switch get_day_of_week()
{
case "Sa", "Su":
print "weekend";
fallthrough;
case "Mo", "Tu", "We", "Th", "Fr":
print "valid result";
break;
default:
print "invalid result";
break;
}
A "switch" block can have any number of "case" labels, and one
optional "default" label.
A "case" label can have a comma-separated list of
more than one value. A value in a "case" label can be an expression,
but it must be a constant expression (i.e., the expression can consist
only of constants).
Each "case" and the "default" block must
end with either a :bro:keyword:`break`, :bro:keyword:`fallthrough`, or
:bro:keyword:`return` statement (although "return" is allowed only
if the "switch" statement is inside a function, hook, or event handler).
If a "case" (or "default") block contain more than one statement, then
there is no need to wrap them in braces.
Note that the braces in a "switch" statement are always required (these
do not indicate the presence of a `compound statement`_), and that no
semicolon is needed at the end of a "switch" statement.
.. bro:keyword:: when
Evaluates a given expression, which must result in a value of type
:bro:type:`bool`. When the value of the expression becomes available
and if the result is true, then a specified statement is executed.
In the following example, if the expression evaluates to true, then
the "print" statement is executed::
when ( (local x = foo()) && x == 42 )
print x;
However, if a timeout is specified, and if the expression does not
evaluate to true within the specified timeout interval, then the
statement following the "timeout" keyword is executed::
when ( (local x = foo()) && x == 42 )
print x;
timeout 5sec {
print "timeout";
}
Note that when a timeout is specified the braces are
always required (these do not indicate a `compound statement`_).
The expression in a "when" statement can contain a declaration of a local
variable but only if the declaration is written in the form
"local *var* = *init*" (example: "local x = myfunction()"). This form
of a local declaration is actually an expression, the result of which
is always a boolean true value.
The expression in a "when" statement can contain an asynchronous function
call such as :bro:id:`lookup_hostname` (in fact, this is the only place
such a function can be called), but it can also contain an ordinary
function call. When an asynchronous function call is in the expression,
then Bro will continue processing statements in the script following
the "when" statement, and when the result of the function call is available
Bro will finish evaluating the expression in the "when" statement.
See the :bro:keyword:`return` statement for an explanation of how to
create an asynchronous function in a Bro script.
.. _compound statement:
**compound statement**
A compound statement is created by wrapping zero or more statements in
braces ``{ }``. Individual statements inside the braces need to be
terminated by a semicolon, but a semicolon is not needed at the end
(outside of the braces) of a compound statement.
A compound statement is required in order to execute more than one
statement in the body of a :bro:keyword:`for`, :bro:keyword:`if`, or
:bro:keyword:`when` statement.
Example::
if ( x == 2 ) {
print "x is 2";
++x;
}
Note that there are other places in the Bro scripting language that use
braces, but that do not indicate the presence of a compound
statement (these are noted in the documentation).
.. _null:
**null statement**
The null statement (executing it has no effect) consists of just a
semicolon. This might be useful during testing or debugging a Bro script
in places where a statement is required, but it is probably not useful
otherwise.
Example::
if ( x == 2 )
;

View file

@ -1,106 +1,128 @@
Types and Attributes
====================
Types Types
----- =====
Every value in a Bro script has a type (see below for a list of all built-in The Bro scripting language supports the following built-in types:
types). Although Bro variables have static types (meaning that their type
is fixed), their type is inferred from the value to which they are
initially assigned when the variable is declared without an explicit type
name.
Automatic conversions happen when a binary operator has operands of +-----------------------+--------------------+
different types. Automatic conversions are limited to converting between | Name | Description |
numeric types. The numeric types are ``int``, ``count``, and ``double`` +=======================+====================+
(``bool`` is not a numeric type). | :bro:type:`bool` | Boolean |
When an automatic conversion occurs, values are promoted to the "highest" +-----------------------+--------------------+
type in the expression. In general, this promotion follows a simple | :bro:type:`count`, | Numeric types |
hierarchy: ``double`` is highest, ``int`` comes next, and ``count`` is | :bro:type:`int`, | |
lowest. | :bro:type:`double` | |
+-----------------------+--------------------+
| :bro:type:`time`, | Time types |
| :bro:type:`interval` | |
+-----------------------+--------------------+
| :bro:type:`string` | String |
+-----------------------+--------------------+
| :bro:type:`pattern` | Regular expression |
+-----------------------+--------------------+
| :bro:type:`port`, | Network types |
| :bro:type:`addr`, | |
| :bro:type:`subnet` | |
+-----------------------+--------------------+
| :bro:type:`enum` | Enumeration |
| | (user-defined type)|
+-----------------------+--------------------+
| :bro:type:`table`, | Container types |
| :bro:type:`set`, | |
| :bro:type:`vector`, | |
| :bro:type:`record` | |
+-----------------------+--------------------+
| :bro:type:`function`, | Executable types |
| :bro:type:`event`, | |
| :bro:type:`hook` | |
+-----------------------+--------------------+
| :bro:type:`file` | File type (only |
| | for writing) |
+-----------------------+--------------------+
| :bro:type:`opaque` | Opaque type (for |
| | some built-in |
| | functions) |
+-----------------------+--------------------+
| :bro:type:`any` | Any type (for |
| | functions or |
| | containers) |
+-----------------------+--------------------+
The Bro scripting language supports the following built-in types. Here is a more detailed description of each type:
.. bro:type:: void
An internal Bro type (i.e., "void" is not a reserved keyword in the Bro
scripting language) representing the absence of a return type for a
function.
.. bro:type:: bool .. bro:type:: bool
Reflects a value with one of two meanings: true or false. The two Reflects a value with one of two meanings: true or false. The two
``bool`` constants are ``T`` and ``F``. "bool" constants are ``T`` and ``F``.
The ``bool`` type supports the following operators: equality/inequality The "bool" type supports the following operators: equality/inequality
(``==``, ``!=``), logical and/or (``&&``, ``||``), logical (``==``, ``!=``), logical and/or (``&&``, ``||``), logical
negation (``!``), and absolute value (where ``|T|`` is 1, and ``|F|`` is 0). negation (``!``), and absolute value (where ``|T|`` is 1, and ``|F|`` is 0,
and in both cases the result type is :bro:type:`count`).
.. bro:type:: int .. bro:type:: int
A numeric type representing a 64-bit signed integer. An ``int`` constant A numeric type representing a 64-bit signed integer. An "int" constant
is a string of digits preceded by a ``+`` or ``-`` sign, e.g. is a string of digits preceded by a "+" or "-" sign, e.g.
``-42`` or ``+5`` (the "+" sign is optional but see note about type ``-42`` or ``+5`` (the "+" sign is optional but see note about type
inferencing below). An ``int`` constant can also be written in inferencing below). An "int" constant can also be written in
hexadecimal notation (in which case "0x" must be between the sign and hexadecimal notation (in which case "0x" must be between the sign and
the hex digits), e.g. ``-0xFF`` or ``+0xabc123``. the hex digits), e.g. ``-0xFF`` or ``+0xabc123``.
The ``int`` type supports the following operators: arithmetic The "int" type supports the following operators: arithmetic
operators (``+``, ``-``, ``*``, ``/``, ``%``), comparison operators operators (``+``, ``-``, ``*``, ``/``, ``%``), comparison operators
(``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), assignment operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), assignment operators
(``=``, ``+=``, ``-=``), pre-increment (``++``), pre-decrement (``=``, ``+=``, ``-=``), pre-increment (``++``), pre-decrement
(``--``), and absolute value (e.g., ``|-3|`` is 3). (``--``), unary plus and minus (``+``, ``-``), and absolute value
(e.g., ``|-3|`` is 3, but the result type is :bro:type:`count`).
When using type inferencing use care so that the When using type inferencing use care so that the
intended type is inferred, e.g. ``local size_difference = 0`` will intended type is inferred, e.g. "local size_difference = 0" will
infer :bro:type:`count`, while ``local size_difference = +0`` infer ":bro:type:`count`", while "local size_difference = +0"
will infer :bro:type:`int`. will infer "int".
.. bro:type:: count .. bro:type:: count
A numeric type representing a 64-bit unsigned integer. A ``count`` A numeric type representing a 64-bit unsigned integer. A "count"
constant is a string of digits, e.g. ``1234`` or ``0``. A ``count`` constant is a string of digits, e.g. ``1234`` or ``0``. A "count"
can also be written in hexadecimal notation (in which case "0x" must can also be written in hexadecimal notation (in which case "0x" must
precede the hex digits), e.g. ``0xff`` or ``0xABC123``. precede the hex digits), e.g. ``0xff`` or ``0xABC123``.
The ``count`` type supports the same operators as the :bro:type:`int` The "count" type supports the same operators as the ":bro:type:`int`"
type. A unary plus or minus applied to a ``count`` results in an ``int``. type, but a unary plus or minus applied to a "count" results in an
"int".
.. bro:type:: counter
An alias to :bro:type:`count`.
.. bro:type:: double .. bro:type:: double
A numeric type representing a double-precision floating-point A numeric type representing a double-precision floating-point
number. Floating-point constants are written as a string of digits number. Floating-point constants are written as a string of digits
with an optional decimal point, optional scale-factor in scientific with an optional decimal point, optional scale-factor in scientific
notation, and optional ``+`` or ``-`` sign. Examples are ``-1234``, notation, and optional "+" or "-" sign. Examples are ``-1234``,
``-1234e0``, ``3.14159``, and ``.003E-23``. ``-1234e0``, ``3.14159``, and ``.003E-23``.
The ``double`` type supports the following operators: arithmetic The "double" type supports the following operators: arithmetic
operators (``+``, ``-``, ``*``, ``/``), comparison operators operators (``+``, ``-``, ``*``, ``/``), comparison operators
(``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), assignment operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), assignment operators
(``=``, ``+=``, ``-=``), and absolute value (e.g., ``|-3.14|`` is 3.14). (``=``, ``+=``, ``-=``), unary plus and minus (``+``, ``-``), and
absolute value (e.g., ``|-3.14|`` is 3.14).
When using type inferencing use care so that the When using type inferencing use care so that the
intended type is inferred, e.g. ``local size_difference = 5`` will intended type is inferred, e.g. "local size_difference = 5" will
infer :bro:type:`count`, while ``local size_difference = 5.0`` infer ":bro:type:`count`", while "local size_difference = 5.0"
will infer :bro:type:`double`. will infer "double".
.. bro:type:: time .. bro:type:: time
A temporal type representing an absolute time. There is currently A temporal type representing an absolute time. There is currently
no way to specify a ``time`` constant, but one can use the no way to specify a ``time`` constant, but one can use the
:bro:id:`double_to_time`, :bro:id:`current_time`, or :bro:id:`network_time` :bro:id:`double_to_time`, :bro:id:`current_time`, or :bro:id:`network_time`
built-in functions to assign a value to a ``time``-typed variable. built-in functions to assign a value to a ``time``-typed variable.
Time values support the comparison operators (``==``, ``!=``, ``<``, Time values support the comparison operators (``==``, ``!=``, ``<``,
``<=``, ``>``, ``>=``). A ``time`` value can be subtracted from ``<=``, ``>``, ``>=``). A ``time`` value can be subtracted from
another ``time`` value to produce an ``interval`` value. An ``interval`` another ``time`` value to produce an :bro:type:`interval` value. An
value can be added to, or subtracted from, a ``time`` value to produce a ``interval`` value can be added to, or subtracted from, a ``time`` value
``time`` value. The absolute value of a ``time`` value is a ``double`` to produce a ``time`` value. The absolute value of a ``time`` value is
with the same numeric value. a :bro:type:`double` with the same numeric value.
.. bro:type:: interval .. bro:type:: interval
@ -115,52 +137,58 @@ The Bro scripting language supports the following built-in types.
``3.5mins``. An ``interval`` can also be negated, for example ``3.5mins``. An ``interval`` can also be negated, for example
``-12 hr`` represents "twelve hours in the past". ``-12 hr`` represents "twelve hours in the past".
Intervals support addition and subtraction. Intervals also support Intervals support addition and subtraction, the comparison operators
division (in which case the result is a ``double`` value), the (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), the assignment
comparison operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), operators (``=``, ``+=``, ``-=``), and unary plus and minus (``+``, ``-``).
and the assignment operators (``=``, ``+=``, ``-=``). Also, an
``interval`` can be multiplied or divided by an arithmetic type Intervals also support division (in which case the result is a
(``count``, ``int``, or ``double``) to produce an ``interval`` value. :bro:type:`double` value). An ``interval`` can be multiplied or divided
The absolute value of an ``interval`` is a ``double`` value equal to the by an arithmetic type (``count``, ``int``, or ``double``) to produce
number of seconds in the ``interval`` (e.g., ``|-1 min|`` is 60). an ``interval`` value. The absolute value of an ``interval`` is a
``double`` value equal to the number of seconds in the ``interval``
(e.g., ``|-1 min|`` is 60.0).
.. bro:type:: string .. bro:type:: string
A type used to hold character-string values which represent text. A type used to hold character-string values which represent text, although
String constants are created by enclosing text in double quotes (") strings in a Bro script can actually contain any arbitrary binary data.
and the backslash character (\\) introduces escape sequences (all of
the C-style escape sequences are supported). String constants are created by enclosing text within a pair of double
quotes ("). A string constant cannot span multiple lines in a Bro script.
The backslash character (\\) introduces escape sequences. The
following escape sequences are recognized: ``\n``, ``\t``, ``\v``, ``\b``,
``\r``, ``\f``, ``\a``, ``\ooo`` (where each 'o' is an octal digit),
``\xhh`` (where each 'h' is a hexadecimal digit). For escape sequences
that don't match any of these, Bro will just remove the backslash (so
to represent a literal backslash in a string constant, you just use
two consecutive backslashes).
Strings support concatenation (``+``), and assignment (``=``, ``+=``). Strings support concatenation (``+``), and assignment (``=``, ``+=``).
Strings also support the comparison operators (``==``, ``!=``, ``<``, Strings also support the comparison operators (``==``, ``!=``, ``<``,
``<=``, ``>``, ``>=``). The number of characters in a string can be ``<=``, ``>``, ``>=``). The number of characters in a string can be
found by enclosing the string within pipe characters (e.g., ``|"abc"|`` found by enclosing the string within pipe characters (e.g., ``|"abc"|``
is 3). is 3). Substring searching can be performed using the "in" or "!in"
The subscript operator can extract an individual character or a substring
of a string (string indexing is zero-based, but an index of
-1 refers to the last character in the string, and -2 refers to the
second-to-last character, etc.). When extracting a substring, the
starting and ending index values are separated by a colon. For example::
local orig = "0123456789";
local third_char = orig[2];
local last_char = orig[-1];
local first_three_chars = orig[0:2];
Substring searching can be performed using the "in" or "!in"
operators (e.g., "bar" in "foobar" yields true). operators (e.g., "bar" in "foobar" yields true).
Note that Bro represents strings internally as a count and vector of The subscript operator can extract a substring of a string. To do this,
bytes rather than a NUL-terminated byte string (although string specify the starting index to extract (if the starting index is omitted,
constants are also automatically NUL-terminated). This is because then zero is assumed), followed by a colon and index
network traffic can easily introduce NULs into strings either by one past the last character to extract (if the last index is omitted,
nature of an application, inadvertently, or maliciously. And while then the extracted substring will go to the end of the original string).
NULs are allowed in Bro strings, when present in strings passed as However, if both the colon and last index are omitted, then a string of
arguments to many functions, a run-time error can occur as their length one is extracted. String indexing is zero-based, but an index
presence likely indicates a sort of problem. In that case, the of -1 refers to the last character in the string, and -2 refers to the
string will also only be represented to the user as the literal second-to-last character, etc. Here are a few examples::
"<string-with-NUL>" string.
local orig = "0123456789";
local second_char = orig[1];
local last_char = orig[-1];
local first_two_chars = orig[:2];
local last_two_chars = orig[8:];
local no_first_and_last = orig[1:9];
Note that the subscript operator cannot be used to modify a string (i.e.,
it cannot be on the left side of an assignment operator).
.. bro:type:: pattern .. bro:type:: pattern
@ -174,7 +202,7 @@ The Bro scripting language supports the following built-in types.
and embedded. and embedded.
In exact matching the ``==`` equality relational operator is used In exact matching the ``==`` equality relational operator is used
with one :bro:type:`pattern` operand and one :bro:type:`string` with one "pattern" operand and one ":bro:type:`string`"
operand (order of operands does not matter) to check whether the full operand (order of operands does not matter) to check whether the full
string exactly matches the pattern. In exact matching, the ``^`` string exactly matches the pattern. In exact matching, the ``^``
beginning-of-line and ``$`` end-of-line anchors are redundant since beginning-of-line and ``$`` end-of-line anchors are redundant since
@ -190,8 +218,8 @@ The Bro scripting language supports the following built-in types.
yields false. The ``!=`` operator would yield the negation of ``==``. yields false. The ``!=`` operator would yield the negation of ``==``.
In embedded matching the ``in`` operator is used with one In embedded matching the ``in`` operator is used with one
:bro:type:`pattern` operand (which must be on the left-hand side) and "pattern" operand (which must be on the left-hand side) and
one :bro:type:`string` operand, but tests whether the pattern one ":bro:type:`string`" operand, but tests whether the pattern
appears anywhere within the given string. For example:: appears anywhere within the given string. For example::
/foo|bar/ in "foobar" /foo|bar/ in "foobar"
@ -203,27 +231,12 @@ The Bro scripting language supports the following built-in types.
is false since "oob" does not appear at the start of "foobar". The is false since "oob" does not appear at the start of "foobar". The
``!in`` operator would yield the negation of ``in``. ``!in`` operator would yield the negation of ``in``.
.. bro:type:: enum
A type allowing the specification of a set of related values that
have no further structure. An example declaration:
.. code:: bro
type color: enum { Red, White, Blue, };
The last comma after ``Blue`` is optional.
The only operations allowed on enumerations are equality comparisons
(``==``, ``!=``) and assignment (``=``).
Enumerations do not have associated values or ordering.
.. bro:type:: port .. bro:type:: port
A type representing transport-level port numbers. Besides TCP and A type representing transport-level port numbers (besides TCP and
UDP ports, there is a concept of an ICMP "port" where the source UDP ports, there is a concept of an ICMP "port" where the source
port is the ICMP message type and the destination port the ICMP port is the ICMP message type and the destination port the ICMP
message code. A ``port`` constant is written as an unsigned integer message code). A ``port`` constant is written as an unsigned integer
followed by one of ``/tcp``, ``/udp``, ``/icmp``, or ``/unknown``. followed by one of ``/tcp``, ``/udp``, ``/icmp``, or ``/unknown``.
Ports support the comparison operators (``==``, ``!=``, ``<``, ``<=``, Ports support the comparison operators (``==``, ``!=``, ``<``, ``<=``,
@ -255,14 +268,6 @@ The Bro scripting language supports the following built-in types.
address) are treated internally as IPv4 addresses (for example, address) are treated internally as IPv4 addresses (for example,
``[::ffff:192.168.1.100]`` is equal to ``192.168.1.100``). ``[::ffff:192.168.1.100]`` is equal to ``192.168.1.100``).
Hostname constants can also be used, but since a hostname can
correspond to multiple IP addresses, the type of such a variable is a
:bro:type:`set` of :bro:type:`addr` elements. For example:
.. code:: bro
local a = www.google.com;
Addresses can be compared for equality (``==``, ``!=``), Addresses can be compared for equality (``==``, ``!=``),
and also for ordering (``<``, ``<=``, ``>``, ``>=``). The absolute value and also for ordering (``<``, ``<=``, ``>``, ``>=``). The absolute value
of an address gives the size in bits (32 for IPv4, and 128 for IPv6). of an address gives the size in bits (32 for IPv4, and 128 for IPv6).
@ -285,9 +290,17 @@ The Bro scripting language supports the following built-in types.
if ( a in s ) if ( a in s )
print "true"; print "true";
Note that you can check if a given ``addr`` is IPv4 or IPv6 using You can check if a given ``addr`` is IPv4 or IPv6 using
the :bro:id:`is_v4_addr` and :bro:id:`is_v6_addr` built-in functions. the :bro:id:`is_v4_addr` and :bro:id:`is_v6_addr` built-in functions.
Note that hostname constants can also be used, but since a hostname can
correspond to multiple IP addresses, the type of such a variable is
"set[addr]". For example:
.. code:: bro
local a = www.google.com;
.. bro:type:: subnet .. bro:type:: subnet
A type representing a block of IP addresses in CIDR notation. A A type representing a block of IP addresses in CIDR notation. A
@ -296,13 +309,24 @@ The Bro scripting language supports the following built-in types.
number. For example, ``192.168.0.0/16`` or ``[fe80::]/64``. number. For example, ``192.168.0.0/16`` or ``[fe80::]/64``.
Subnets can be compared for equality (``==``, ``!=``). An Subnets can be compared for equality (``==``, ``!=``). An
:bro:type:`addr` can be checked for inclusion in a subnet using "addr" can be checked for inclusion in a subnet using
the "in" or "!in" operators. the ``in`` or ``!in`` operators.
.. bro:type:: any .. bro:type:: enum
Used to bypass strong typing. For example, a function can take an A type allowing the specification of a set of related values that
argument of type ``any`` when it may be of different types. have no further structure. An example declaration:
.. code:: bro
type color: enum { Red, White, Blue, };
The last comma after ``Blue`` is optional. Both the type name ``color``
and the individual values (``Red``, etc.) have global scope.
Enumerations do not have associated values or ordering.
The only operations allowed on enumerations are equality comparisons
(``==``, ``!=``) and assignment (``=``).
.. bro:type:: table .. bro:type:: table
@ -316,24 +340,25 @@ The Bro scripting language supports the following built-in types.
table [ type^+ ] of type table [ type^+ ] of type
where *type^+* is one or more types, separated by commas. For example: where *type^+* is one or more types, separated by commas.
For example:
.. code:: bro .. code:: bro
global a: table[count] of string; global a: table[count] of string;
declares a table indexed by :bro:type:`count` values and yielding declares a table indexed by "count" values and yielding
:bro:type:`string` values. The yield type can also be more complex: "string" values. The yield type can also be more complex:
.. code:: bro .. code:: bro
global a: table[count] of table[addr, port] of string; global a: table[count] of table[addr, port] of string;
which declares a table indexed by :bro:type:`count` and yielding which declares a table indexed by "count" and yielding
another :bro:type:`table` which is indexed by an :bro:type:`addr` another "table" which is indexed by an "addr"
and :bro:type:`port` to yield a :bro:type:`string`. and "port" to yield a "string".
Initialization of tables occurs by enclosing a set of initializers within One way to initialize a table is by enclosing a set of initializers within
braces, for example: braces, for example:
.. code:: bro .. code:: bro
@ -343,18 +368,17 @@ The Bro scripting language supports the following built-in types.
[5] = "five", [5] = "five",
}; };
A table constructor (equivalent to above example) can also be used A table constructor can also be used to create a table:
to create a table:
.. code:: bro .. code:: bro
global t2: table[count] of string = table( global t2 = table(
[11] = "eleven", [192.168.0.2, 22/tcp] = "ssh",
[5] = "five" [192.168.0.3, 80/tcp] = "http"
); );
Table constructors can also be explicitly named by a type, which is Table constructors can also be explicitly named by a type, which is
useful for when a more complex index type could otherwise be useful when a more complex index type could otherwise be
ambiguous: ambiguous:
.. code:: bro .. code:: bro
@ -381,17 +405,7 @@ The Bro scripting language supports the following built-in types.
if ( 13 in t ) if ( 13 in t )
... ...
if ( [192.168.0.2, 22/tcp] in t2 )
Iterate over tables with a ``for`` loop:
.. code:: bro
local t: table[count] of string;
for ( n in t )
...
local services: table[addr, port] of string;
for ( [a, p] in services )
... ...
Add or overwrite individual table elements by assignment: Add or overwrite individual table elements by assignment:
@ -400,7 +414,7 @@ The Bro scripting language supports the following built-in types.
t[13] = "thirteen"; t[13] = "thirteen";
Remove individual table elements with ``delete``: Remove individual table elements with :bro:keyword:`delete`:
.. code:: bro .. code:: bro
@ -416,6 +430,9 @@ The Bro scripting language supports the following built-in types.
|t| |t|
See the :bro:keyword:`for` statement for info on how to iterate over
the elements in a table.
.. bro:type:: set .. bro:type:: set
A set is like a :bro:type:`table`, but it is a collection of indices A set is like a :bro:type:`table`, but it is a collection of indices
@ -426,25 +443,22 @@ The Bro scripting language supports the following built-in types.
where *type^+* is one or more types separated by commas. where *type^+* is one or more types separated by commas.
Sets are initialized by listing elements enclosed by curly braces: Sets can be initialized by listing elements enclosed by curly braces:
.. code:: bro .. code:: bro
global s: set[port] = { 21/tcp, 23/tcp, 80/tcp, 443/tcp }; global s: set[port] = { 21/tcp, 23/tcp, 80/tcp, 443/tcp };
global s2: set[port, string] = { [21/tcp, "ftp"], [23/tcp, "telnet"] }; global s2: set[port, string] = { [21/tcp, "ftp"], [23/tcp, "telnet"] };
The types are explicitly shown in the example above, but they could
have been left to type inference.
A set constructor (equivalent to above example) can also be used to A set constructor (equivalent to above example) can also be used to
create a set: create a set:
.. code:: bro .. code:: bro
global s3: set[port] = set(21/tcp, 23/tcp, 80/tcp, 443/tcp); global s3 = set(21/tcp, 23/tcp, 80/tcp, 443/tcp);
Set constructors can also be explicitly named by a type, which is Set constructors can also be explicitly named by a type, which is
useful for when a more complex index type could otherwise be useful when a more complex index type could otherwise be
ambiguous: ambiguous:
.. code:: bro .. code:: bro
@ -465,18 +479,10 @@ The Bro scripting language supports the following built-in types.
if ( 21/tcp in s ) if ( 21/tcp in s )
... ...
if ( 21/tcp !in s ) if ( [21/tcp, "ftp"] !in s2 )
... ...
Iterate over a set with a ``for`` loop: Elements are added with :bro:keyword:`add`:
.. code:: bro
local s: set[port];
for ( p in s )
...
Elements are added with ``add``:
.. code:: bro .. code:: bro
@ -485,7 +491,7 @@ The Bro scripting language supports the following built-in types.
Nothing happens if the element with value ``22/tcp`` was already present in Nothing happens if the element with value ``22/tcp`` was already present in
the set. the set.
And removed with ``delete``: And removed with :bro:keyword:`delete`:
.. code:: bro .. code:: bro
@ -501,6 +507,9 @@ The Bro scripting language supports the following built-in types.
|s| |s|
See the :bro:keyword:`for` statement for info on how to iterate over
the elements in a set.
.. bro:type:: vector .. bro:type:: vector
A vector is like a :bro:type:`table`, except it's always indexed by a A vector is like a :bro:type:`table`, except it's always indexed by a
@ -515,7 +524,7 @@ The Bro scripting language supports the following built-in types.
.. code:: bro .. code:: bro
global v: vector of string = vector("one", "two", "three"); local v = vector("one", "two", "three");
Vector constructors can also be explicitly named by a type, which Vector constructors can also be explicitly named by a type, which
is useful for when a more complex yield type could otherwise be is useful for when a more complex yield type could otherwise be
@ -539,14 +548,6 @@ The Bro scripting language supports the following built-in types.
print v[2]; print v[2];
Iterate over a vector with a ``for`` loop:
.. code:: bro
local v: vector of string;
for ( n in v )
...
An element can be added to a vector by assigning the value (a value An element can be added to a vector by assigning the value (a value
that already exists at that index will be overwritten): that already exists at that index will be overwritten):
@ -577,11 +578,17 @@ The Bro scripting language supports the following built-in types.
The resulting vector of bool is the logical "and" (or logical "or") of The resulting vector of bool is the logical "and" (or logical "or") of
each element of the operand vectors. each element of the operand vectors.
See the :bro:keyword:`for` statement for info on how to iterate over
the elements in a vector.
.. bro:type:: record .. bro:type:: record
A ``record`` is a collection of values. Each value has a field name A "record" is a collection of values. Each value has a field name
and a type. Values do not need to have the same type and the types and a type. Values do not need to have the same type and the types
have no restrictions. An example record type definition: have no restrictions. Field names must follow the same syntax as
regular variable names (except that field names are allowed to be the
same as local or global variables). An example record type
definition:
.. code:: bro .. code:: bro
@ -590,85 +597,44 @@ The Bro scripting language supports the following built-in types.
s: string &optional; s: string &optional;
}; };
Access to a record field uses the dollar sign (``$``) operator: Records can be initialized or assigned as a whole in three different ways.
.. code:: bro
global r: MyRecordType;
r$c = 13;
Record assignment can be done field by field or as a whole like:
.. code:: bro
r = [$c = 13, $s = "thirteen"];
When assigning a whole record value, all fields that are not When assigning a whole record value, all fields that are not
:bro:attr:`&optional` or have a :bro:attr:`&default` attribute must :bro:attr:`&optional` or have a :bro:attr:`&default` attribute must
be specified. be specified. First, there's a constructor syntax:
To test for existence of a field that is :bro:attr:`&optional`, use the
``?$`` operator:
.. code:: bro .. code:: bro
if ( r?$s ) local r: MyRecordType = record($c = 7);
...
Records can also be created using a constructor syntax:
.. code:: bro
global r2: MyRecordType = record($c = 7);
And the constructor can be explicitly named by type, too, which And the constructor can be explicitly named by type, too, which
is arguably more readable code: is arguably more readable:
.. code:: bro .. code:: bro
global r3 = MyRecordType($c = 42); local r = MyRecordType($c = 42);
.. bro:type:: opaque And the third way is like this:
A data type whose actual representation/implementation is
intentionally hidden, but whose values may be passed to certain
functions that can actually access the internal/hidden resources.
Opaque types are differentiated from each other by qualifying them
like ``opaque of md5`` or ``opaque of sha1``. Any valid identifier
can be used as the type qualifier.
An example use of this type is the set of built-in functions which
perform hashing:
.. code:: bro .. code:: bro
local handle: opaque of md5 = md5_hash_init(); local r: MyRecordType = [$c = 13, $s = "thirteen"];
md5_hash_update(handle, "test");
md5_hash_update(handle, "testing");
print md5_hash_finish(handle);
Here the opaque type is used to provide a handle to a particular Access to a record field uses the dollar sign (``$``) operator, and
resource which is calculating an MD5 checksum incrementally over record fields can be assigned with this:
time, but the details of that resource aren't relevant, it's only
necessary to have a handle as a way of identifying it and
distinguishing it from other such resources.
.. bro:type:: file
Bro supports writing to files, but not reading from them. Files
can be opened using either the :bro:id:`open` or :bro:id:`open_for_append`
built-in functions, and closed using the :bro:id:`close` built-in
function. For example, declare, open, and write to a file
and finally close it like:
.. code:: bro .. code:: bro
global f: file = open("myfile"); local r: MyRecordType;
print f, "hello, world"; r$c = 13;
close(f);
Writing to files like this for logging usually isn't recommended, for better To test if a field that is :bro:attr:`&optional` has been assigned a
logging support see :doc:`/frameworks/logging`. value, use the ``?$`` operator (it returns a :bro:type:`bool` value of
``T`` if the field has been assigned a value, or ``F`` if not):
.. code:: bro
if ( r ?$ s )
...
.. bro:type:: function .. bro:type:: function
@ -700,6 +666,16 @@ The Bro scripting language supports the following built-in types.
type, but when it is, the return type and argument list (including the type, but when it is, the return type and argument list (including the
name of each argument) must match exactly. name of each argument) must match exactly.
Here is an example function that takes no parameters and does not
return a value:
.. code:: bro
function my_func()
{
print "my_func";
}
Function types don't need to have a name and can be assigned anonymously: Function types don't need to have a name and can be assigned anonymously:
.. code:: bro .. code:: bro
@ -742,9 +718,20 @@ The Bro scripting language supports the following built-in types.
Event handlers are nearly identical in both syntax and semantics to Event handlers are nearly identical in both syntax and semantics to
a :bro:type:`function`, with the two differences being that event a :bro:type:`function`, with the two differences being that event
handlers have no return type since they never return a value, and handlers have no return type since they never return a value, and
you cannot call an event handler. Instead of directly calling an you cannot call an event handler.
event handler from a script, event handler bodies are executed when
they are invoked by one of three different methods: Example:
.. code:: bro
event my_event(r: bool, s: string)
{
print "my_event", r, s;
}
Instead of directly calling an event handler from a script, event
handler bodies are executed when they are invoked by one of three
different methods:
- From the event engine - From the event engine
@ -765,7 +752,7 @@ The Bro scripting language supports the following built-in types.
This assumes that ``password_exposed`` was previously declared This assumes that ``password_exposed`` was previously declared
as an event handler type with compatible arguments. as an event handler type with compatible arguments.
- Via the ``schedule`` expression in a script - Via the :bro:keyword:`schedule` expression in a script
This delays the invocation of event handlers until some time in This delays the invocation of event handlers until some time in
the future. For example: the future. For example:
@ -789,8 +776,8 @@ The Bro scripting language supports the following built-in types.
immediate and they do not get scheduled through an event queue. immediate and they do not get scheduled through an event queue.
Also, a unique feature of a hook is that a given hook handler body Also, a unique feature of a hook is that a given hook handler body
can short-circuit the execution of remaining hook handlers simply by can short-circuit the execution of remaining hook handlers simply by
exiting from the body as a result of a ``break`` statement (as exiting from the body as a result of a :bro:keyword:`break` statement (as
opposed to a ``return`` or just reaching the end of the body). opposed to a :bro:keyword:`return` or just reaching the end of the body).
A hook type is declared like:: A hook type is declared like::
@ -859,142 +846,60 @@ The Bro scripting language supports the following built-in types.
executed due to one handler body exiting as a result of a ``break`` executed due to one handler body exiting as a result of a ``break``
statement. statement.
Attributes .. bro:type:: file
----------
Attributes occur at the end of type/event declarations and change their Bro supports writing to files, but not reading from them (to read from
behavior. The syntax is ``&key`` or ``&key=val``, e.g., ``type T: files see the :doc:`/frameworks/input`). Files
set[count] &read_expire=5min`` or ``event foo() &priority=-3``. The Bro can be opened using either the :bro:id:`open` or :bro:id:`open_for_append`
scripting language supports the following built-in attributes. built-in functions, and closed using the :bro:id:`close` built-in
function. For example, declare, open, and write to a file and finally
close it like:
.. bro:attr:: &optional .. code:: bro
Allows a record field to be missing. For example the type ``record { local f = open("myfile");
a: addr; b: port &optional; }`` could be instantiated both as print f, "hello, world";
singleton ``[$a=127.0.0.1]`` or pair ``[$a=127.0.0.1, $b=80/tcp]``. close(f);
.. bro:attr:: &default Writing to files like this for logging usually isn't recommended, for better
logging support see :doc:`/frameworks/logging`.
Uses a default value for a record field, a function/hook/event .. bro:type:: opaque
parameter, or container elements. For example, ``table[int] of
string &default="foo"`` would create a table that returns the
:bro:type:`string` ``"foo"`` for any non-existing index.
.. bro:attr:: &redef A data type whose actual representation/implementation is
intentionally hidden, but whose values may be passed to certain
built-in functions that can actually access the internal/hidden resources.
Opaque types are differentiated from each other by qualifying them
like "opaque of md5" or "opaque of sha1".
Allows for redefinition of initial object values. This is typically An example use of this type is the set of built-in functions which
used with constants, for example, ``const clever = T &redef;`` would perform hashing:
allow the constant to be redefined at some later point during script
execution.
.. bro:attr:: &rotate_interval .. code:: bro
Rotates a file after a specified interval. local handle = md5_hash_init();
md5_hash_update(handle, "test");
md5_hash_update(handle, "testing");
print md5_hash_finish(handle);
.. bro:attr:: &rotate_size Here the opaque type is used to provide a handle to a particular
resource which is calculating an MD5 hash incrementally over
time, but the details of that resource aren't relevant, it's only
necessary to have a handle as a way of identifying it and
distinguishing it from other such resources.
Rotates a file after it has reached a given size in bytes. .. bro:type:: any
.. bro:attr:: &add_func Used to bypass strong typing. For example, a function can take an
argument of type ``any`` when it may be of different types.
The only operation allowed on a variable of type ``any`` is assignment.
Can be applied to an identifier with &redef to specify a function to Note that users aren't expected to use this type. It's provided mainly
be called any time a "redef <id> += ..." declaration is parsed. The for use by some built-in functions and scripts included with Bro.
function takes two arguments of the same type as the identifier, the first
being the old value of the variable and the second being the new
value given after the "+=" operator in the "redef" declaration. The
return value of the function will be the actual new value of the
variable after the "redef" declaration is parsed.
.. bro:attr:: &delete_func .. bro:type:: void
Same as &add_func, except for "redef" declarations that use the "-=" An internal Bro type (i.e., "void" is not a reserved keyword in the Bro
operator. scripting language) representing the absence of a return type for a
function.
.. bro:attr:: &expire_func
Called right before a container element expires. The function's
first parameter is of the same type of the container and the second
parameter the same type of the container's index. The return
value is an :bro:type:`interval` indicating the amount of additional
time to wait before expiring the container element at the given
index (which will trigger another execution of this function).
.. bro:attr:: &read_expire
Specifies a read expiration timeout for container elements. That is,
the element expires after the given amount of time since the last
time it has been read. Note that a write also counts as a read.
.. bro:attr:: &write_expire
Specifies a write expiration timeout for container elements. That
is, the element expires after the given amount of time since the
last time it has been written.
.. bro:attr:: &create_expire
Specifies a creation expiration timeout for container elements. That
is, the element expires after the given amount of time since it has
been inserted into the container, regardless of any reads or writes.
.. bro:attr:: &persistent
Makes a variable persistent, i.e., its value is written to disk (per
default at shutdown time).
.. bro:attr:: &synchronized
Synchronizes variable accesses across nodes. The value of a
``&synchronized`` variable is automatically propagated to all peers
when it changes.
.. bro:attr:: &encrypt
Encrypts files right before writing them to disk.
.. TODO: needs to be documented in more detail.
.. bro:attr:: &raw_output
Opens a file in raw mode, i.e., non-ASCII characters are not
escaped.
.. bro:attr:: &mergeable
Prefers set union to assignment for synchronized state. This
attribute is used in conjunction with :bro:attr:`&synchronized`
container types: when the same container is updated at two peers
with different value, the propagation of the state causes a race
condition, where the last update succeeds. This can cause
inconsistencies and can be avoided by unifying the two sets, rather
than merely overwriting the old value.
.. bro:attr:: &priority
Specifies the execution priority (as a signed integer) of a hook or
event handler. Higher values are executed before lower ones. The
default value is 0.
.. bro:attr:: &group
Groups event handlers such that those in the same group can be
jointly activated or deactivated.
.. bro:attr:: &log
Writes a record field to the associated log stream.
.. bro:attr:: &error_handler
Internally set on the events that are associated with the reporter
framework: :bro:id:`reporter_info`, :bro:id:`reporter_warning`, and
:bro:id:`reporter_error`. It prevents any handlers of those events
from being able to generate reporter messages that go through any of
those events (i.e., it prevents an infinite event recursion). Instead,
such nested reporter messages are output to stderr.
.. bro:attr:: &type_column
Used by the input framework. It can be used on columns of type
:bro:type:`port` and specifies the name of an additional column in
the input file which specifies the protocol of the port (tcp/udp/icmp).

View file

@ -1,13 +1,19 @@
event bro_init() event bro_init()
{ {
# Declaration of the table.
local ssl_services: table[string] of port; local ssl_services: table[string] of port;
# Initialize the table.
ssl_services = table(["SSH"] = 22/tcp, ["HTTPS"] = 443/tcp); ssl_services = table(["SSH"] = 22/tcp, ["HTTPS"] = 443/tcp);
# Insert one key-yield pair into the table.
ssl_services["IMAPS"] = 993/tcp; ssl_services["IMAPS"] = 993/tcp;
# Check if the key "SMTPS" is not in the table.
if ( "SMTPS" !in ssl_services ) if ( "SMTPS" !in ssl_services )
ssl_services["SMTPS"] = 587/tcp; ssl_services["SMTPS"] = 587/tcp;
# Iterate over each key in the table.
for ( k in ssl_services ) for ( k in ssl_services )
print fmt("Service Name: %s - Common Port: %s", k, ssl_services[k]); print fmt("Service Name: %s - Common Port: %s", k, ssl_services[k]);
} }

View file

@ -10,6 +10,6 @@ event bro_init()
print fmt("contents of v1: %s", v1); print fmt("contents of v1: %s", v1);
print fmt("length of v1: %d", |v1|); print fmt("length of v1: %d", |v1|);
print fmt("contents of v1: %s", v2); print fmt("contents of v2: %s", v2);
print fmt("length of v2: %d", |v2|); print fmt("length of v2: %d", |v2|);
} }

View file

@ -1,6 +1,6 @@
event bro_init() event bro_init()
{ {
local test_string = "The quick brown fox jumped over the lazy dog."; local test_string = "The quick brown fox jumps over the lazy dog.";
local test_pattern = /quick|lazy/; local test_pattern = /quick|lazy/;
if ( test_pattern in test_string ) if ( test_pattern in test_string )

View file

@ -1,8 +1,10 @@
module Factor; module Factor;
export { export {
# Append the value LOG to the Log::ID enumerable.
redef enum Log::ID += { LOG }; redef enum Log::ID += { LOG };
# Define a new type called Factor::Info.
type Info: record { type Info: record {
num: count &log; num: count &log;
factorial_num: count &log; factorial_num: count &log;
@ -20,6 +22,7 @@ function factorial(n: count): count
event bro_init() event bro_init()
{ {
# Create the logging stream.
Log::create_stream(LOG, [$columns=Info]); Log::create_stream(LOG, [$columns=Info]);
} }

View file

@ -2,7 +2,6 @@
@load base/protocols/ssh/ @load base/protocols/ssh/
redef Notice::emailed_types += { redef Notice::emailed_types += {
SSH::Interesting_Hostname_Login, SSH::Interesting_Hostname_Login
SSH::Login
}; };

View file

@ -3,5 +3,4 @@
redef Notice::type_suppression_intervals += { redef Notice::type_suppression_intervals += {
[SSH::Interesting_Hostname_Login] = 1day, [SSH::Interesting_Hostname_Login] = 1day,
[SSH::Login] = 12hrs,
}; };

View file

@ -54,7 +54,8 @@ script and much more in following sections.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/frameworks/files/detect-MHR.bro .. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/frameworks/files/detect-MHR.bro
:lines: 4-6 :lines: 4-6
Lines 3 to 5 of the script process the ``__load__.bro`` script in the The first part of the script consists of ``@load`` directives which
process the ``__load__.bro`` script in the
respective directories being loaded. The ``@load`` directives are respective directories being loaded. The ``@load`` directives are
often considered good practice or even just good manners when writing often considered good practice or even just good manners when writing
Bro scripts to make sure they can be used on their own. While it's unlikely that in a Bro scripts to make sure they can be used on their own. While it's unlikely that in a
@ -78,29 +79,37 @@ of the :bro:id:`NOTICE` function to generate notices of type
``TeamCymruMalwareHashRegistry::Match`` as done in the next section. Notices ``TeamCymruMalwareHashRegistry::Match`` as done in the next section. Notices
allow Bro to generate some kind of extra notification beyond its allow Bro to generate some kind of extra notification beyond its
default log types. Often times, this extra notification comes in the default log types. Often times, this extra notification comes in the
form of an email generated and sent to a preconfigured address, but can be altered form of an email generated and sent to a preconfigured address, but can
depending on the needs of the deployment. The export section is finished off with be altered depending on the needs of the deployment. The export section
the definition of two constants that list the kind of files we want to match against and is finished off with the definition of a few constants that list the kind
the minimum percentage of detection threshold in which we are interested. of files we want to match against and the minimum percentage of
detection threshold in which we are interested.
Up until this point, the script has merely done some basic setup. With the next section, Up until this point, the script has merely done some basic setup. With
the script starts to define instructions to take in a given event. the next section, the script starts to define instructions to take in
a given event.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/frameworks/files/detect-MHR.bro .. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/frameworks/files/detect-MHR.bro
:lines: 38-62 :lines: 38-71
The workhorse of the script is contained in the event handler for The workhorse of the script is contained in the event handler for
``file_hash``. The :bro:see:`file_hash` event allows scripts to access ``file_hash``. The :bro:see:`file_hash` event allows scripts to access
the information associated with a file for which Bro's file analysis framework has the information associated with a file for which Bro's file analysis
generated a hash. The event handler is passed the file itself as ``f``, the type of digest framework has generated a hash. The event handler is passed the
algorithm used as ``kind`` and the hash generated as ``hash``. file itself as ``f``, the type of digest algorithm used as ``kind``
and the hash generated as ``hash``.
On line 3, an ``if`` statement is used to check for the correct type of hash, in this case In the ``file_hash`` event handler, there is an ``if`` statement that is used
a SHA1 hash. It also checks for a mime type we've defined as being of interest as defined in the to check for the correct type of hash, in this case
constant ``match_file_types``. The comparison is made against the expression ``f$mime_type``, which uses a SHA1 hash. It also checks for a mime type we've defined as
the ``$`` dereference operator to check the value ``mime_type`` inside the variable ``f``. Once both being of interest as defined in the constant ``match_file_types``.
values resolve to true, a local variable is defined to hold a string comprised of the SHA1 hash concatenated The comparison is made against the expression ``f$mime_type``, which uses
with ``.malware.hash.cymru.com``; this value will be the domain queried in the malware hash registry. the ``$`` dereference operator to check the value ``mime_type``
inside the variable ``f``. If the entire expression evaluates to true,
then a helper function is called to do the rest of the work. In that
function, a local variable is defined to hold a string comprised of
the SHA1 hash concatenated with ``.malware.hash.cymru.com``; this
value will be the domain queried in the malware hash registry.
The rest of the script is contained within a ``when`` block. In The rest of the script is contained within a ``when`` block. In
short, a ``when`` block is used when Bro needs to perform asynchronous short, a ``when`` block is used when Bro needs to perform asynchronous
@ -111,24 +120,28 @@ this event continues and upon receipt of the values returned by
:bro:id:`lookup_hostname_txt`, the ``when`` block is executed. The :bro:id:`lookup_hostname_txt`, the ``when`` block is executed. The
``when`` block splits the string returned into a portion for the date on which ``when`` block splits the string returned into a portion for the date on which
the malware was first detected and the detection rate by splitting on an text space the malware was first detected and the detection rate by splitting on an text space
and storing the values returned in a local table variable. In line 12, if the table and storing the values returned in a local table variable.
returned by ``split1`` has two entries, indicating a successful split, we store the detection In the ``do_mhr_lookup`` function, if the table
date in ``mhr_first_detected`` and the rate in ``mhr_detect_rate`` on lines 14 and 15 respectively returned by ``split1`` has two entries, indicating a successful split, we
store the detection
date in ``mhr_first_detected`` and the rate in ``mhr_detect_rate``
using the appropriate conversion functions. From this point on, Bro knows it has seen a file using the appropriate conversion functions. From this point on, Bro knows it has seen a file
transmitted which has a hash that has been seen by the Team Cymru Malware Hash Registry, the rest transmitted which has a hash that has been seen by the Team Cymru Malware Hash Registry, the rest
of the script is dedicated to producing a notice. of the script is dedicated to producing a notice.
On line 17, the detection time is processed into a string representation and stored in The detection time is processed into a string representation and stored in
``readable_first_detected``. The script then compares the detection rate against the ``readable_first_detected``. The script then compares the detection rate
``notice_threshold`` that was defined earlier. If the detection rate is high enough, the script against the ``notice_threshold`` that was defined earlier. If the
creates a concise description of the notice on line 22, a possible URL to check the sample against detection rate is high enough, the script creates a concise description
``virustotal.com``'s database, and makes the call to :bro:id:`NOTICE` to hand the relevant information of the notice and stores it in the ``message`` variable. It also
off to the Notice framework. creates a possible URL to check the sample against
``virustotal.com``'s database, and makes the call to :bro:id:`NOTICE`
to hand the relevant information off to the Notice framework.
In approximately 25 lines of code, Bro provides an amazing In approximately a few dozen lines of code, Bro provides an amazing
utility that would be incredibly difficult to implement and deploy utility that would be incredibly difficult to implement and deploy
with other products. In truth, claiming that Bro does this in 25 with other products. In truth, claiming that Bro does this in such a small
lines is a misdirection; there is a truly massive number of things number of lines is a misdirection; there is a truly massive number of things
going on behind-the-scenes in Bro, but it is the inclusion of the going on behind-the-scenes in Bro, but it is the inclusion of the
scripting language that gives analysts access to those underlying scripting language that gives analysts access to those underlying
layers in a succinct and well defined manner. layers in a succinct and well defined manner.
@ -180,7 +193,7 @@ event definition used by Bro. As Bro detects DNS requests being
issued by an originator, it issues this event and any number of issued by an originator, it issues this event and any number of
scripts then have access to the data Bro passes along with the event. scripts then have access to the data Bro passes along with the event.
In this example, Bro passes not only the message, the query, query In this example, Bro passes not only the message, the query, query
type and query class for the DNS request, but also a then record used type and query class for the DNS request, but also a record used
for the connection itself. for the connection itself.
The Connection Record Data Type The Connection Record Data Type
@ -210,8 +223,7 @@ into the connection record data type will be
:bro:id:`connection_state_remove` . As detailed in the in-line :bro:id:`connection_state_remove` . As detailed in the in-line
documentation, Bro generates this event just before it decides to documentation, Bro generates this event just before it decides to
remove this event from memory, effectively forgetting about it. Let's remove this event from memory, effectively forgetting about it. Let's
take a look at a simple script, stored as take a look at a simple example script, that will output the connection record
``connection_record_01.bro``, that will output the connection record
for a single connection. for a single connection.
.. btest-include:: ${DOC_ROOT}/scripting/connection_record_01.bro .. btest-include:: ${DOC_ROOT}/scripting/connection_record_01.bro
@ -243,12 +255,12 @@ of reference for accessing data in a script.
Bro makes extensive use of nested data structures to store state and Bro makes extensive use of nested data structures to store state and
information gleaned from the analysis of a connection as a complete information gleaned from the analysis of a connection as a complete
unit. To break down this collection of information, you will have to unit. To break down this collection of information, you will have to
make use of use Bro's field delimiter ``$``. For example, the make use of Bro's field delimiter ``$``. For example, the
originating host is referenced by ``c$id$orig_h`` which if given a originating host is referenced by ``c$id$orig_h`` which if given a
narrative relates to ``orig_h`` which is a member of ``id`` which is narrative relates to ``orig_h`` which is a member of ``id`` which is
a member of the data structure referred to as ``c`` that was passed a member of the data structure referred to as ``c`` that was passed
into the event handler." Given that the responder port into the event handler. Given that the responder port
(``c$id$resp_p``) is ``53/tcp``, it's likely that Bro's base HTTP scripts ``c$id$resp_p`` is ``80/tcp``, it's likely that Bro's base HTTP scripts
can further populate the connection record. Let's load the can further populate the connection record. Let's load the
``base/protocols/http`` scripts and check the output of our script. ``base/protocols/http`` scripts and check the output of our script.
@ -276,7 +288,7 @@ As mentioned above, including the appropriate ``@load`` statements is
not only good practice, but can also help to indicate which not only good practice, but can also help to indicate which
functionalities are being used in a script. Take a second to run the functionalities are being used in a script. Take a second to run the
script without the ``-b`` flag and check the output when all of Bro's script without the ``-b`` flag and check the output when all of Bro's
functionality is applied to the tracefile. functionality is applied to the trace file.
Data Types and Data Structures Data Types and Data Structures
============================== ==============================
@ -384,9 +396,12 @@ which it was declared. Local variables tend to be used for values
that are only needed within a specific scope and once the processing that are only needed within a specific scope and once the processing
of a script passes beyond that scope and no longer used, the variable of a script passes beyond that scope and no longer used, the variable
is deleted. Bro maintains names of locals separately from globally is deleted. Bro maintains names of locals separately from globally
visible ones, an example of which is illustrated below. The script visible ones, an example of which is illustrated below.
executes the event handler :bro:id:`bro_init` which in turn calls the
function ``add_two(i: count)`` with an argument of ``10``. Once Bro .. btest-include:: ${DOC_ROOT}/scripting/data_type_local.bro
The script executes the event handler :bro:id:`bro_init` which in turn calls
the function ``add_two(i: count)`` with an argument of ``10``. Once Bro
enters the ``add_two`` function, it provisions a locally scoped enters the ``add_two`` function, it provisions a locally scoped
variable called ``added_two`` to hold the value of ``i+2``, in this variable called ``added_two`` to hold the value of ``i+2``, in this
case, ``12``. The ``add_two`` function then prints the value of the case, ``12``. The ``add_two`` function then prints the value of the
@ -398,8 +413,6 @@ processing the ``bro_init`` function, the variable called ``test`` is
no longer in scope and, since there exist no other references to the no longer in scope and, since there exist no other references to the
value ``12``, the value is also deleted. value ``12``, the value is also deleted.
.. btest-include:: ${DOC_ROOT}/scripting/data_type_local.bro
Data Structures Data Structures
--------------- ---------------
@ -506,20 +519,7 @@ Tables
A table in Bro is a mapping of a key to a value or yield. While the A table in Bro is a mapping of a key to a value or yield. While the
values don't have to be unique, each key in the table must be unique values don't have to be unique, each key in the table must be unique
to preserve a one-to-one mapping of keys to values. In the example to preserve a one-to-one mapping of keys to values.
below, we've compiled a table of SSL-enabled services and their common
ports. The explicit declaration and constructor for the table on
lines 5 and 7 lay out the data types of the keys (strings) and the
data types of the yields (ports) and then fill in some sample key and
yield pairs. Line 8 shows how to use a table accessor to insert one
key-yield pair into the table. When using the ``in`` operator on a table,
you are effectively working with the keys of the table. In the case
of an ``if`` statement, the ``in`` operator will check for membership among
the set of keys and return a true or false value. As seen on line 10,
we are checking if ``SMTPS`` is not in the set of keys for the
ssl_services table and if the condition holds true, we add the
key-yield pair to the table. Line 13 shows the use of a ``for`` statement
to iterate over each key currently in the table.
.. btest-include:: ${DOC_ROOT}/scripting/data_struct_table_declaration.bro .. btest-include:: ${DOC_ROOT}/scripting/data_struct_table_declaration.bro
@ -527,6 +527,21 @@ to iterate over each key currently in the table.
@TEST-EXEC: btest-rst-cmd bro ${DOC_ROOT}/scripting/data_struct_table_declaration.bro @TEST-EXEC: btest-rst-cmd bro ${DOC_ROOT}/scripting/data_struct_table_declaration.bro
In this example,
we've compiled a table of SSL-enabled services and their common
ports. The explicit declaration and constructor for the table are on
two different lines and lay out the data types of the keys (strings) and the
data types of the yields (ports) and then fill in some sample key and
yield pairs. You can also use a table accessor to insert one
key-yield pair into the table. When using the ``in``
operator on a table, you are effectively working with the keys of the table.
In the case of an ``if`` statement, the ``in`` operator will check for
membership among the set of keys and return a true or false value.
The example shows how to check if ``SMTPS`` is not in the set
of keys for the ``ssl_services`` table and if the condition holds true,
we add the key-yield pair to the table. Finally, the example shows how
to use a ``for`` statement to iterate over each key currently in the table.
Simple examples aside, tables can become extremely complex as the keys Simple examples aside, tables can become extremely complex as the keys
and values for the table become more intricate. Tables can have keys and values for the table become more intricate. Tables can have keys
comprised of multiple data types and even a series of elements called comprised of multiple data types and even a series of elements called
@ -535,9 +550,15 @@ Bro implies a cost in complexity for the person writing the scripts
but pays off in effectiveness given the power of Bro as a network but pays off in effectiveness given the power of Bro as a network
security platform. security platform.
The script below shows a sample table of strings indexed by two .. btest-include:: ${DOC_ROOT}/scripting/data_struct_table_complex.bro
.. btest:: data_struct_table_complex
@TEST-EXEC: btest-rst-cmd bro -b ${DOC_ROOT}/scripting/data_struct_table_complex.bro
This script shows a sample table of strings indexed by two
strings, a count, and a final string. With a tuple acting as an strings, a count, and a final string. With a tuple acting as an
aggregate key, the order is the important as a change in order would aggregate key, the order is important as a change in order would
result in a new key. Here, we're using the table to track the result in a new key. Here, we're using the table to track the
director, studio, year or release, and lead actor in a series of director, studio, year or release, and lead actor in a series of
samurai flicks. It's important to note that in the case of the ``for`` samurai flicks. It's important to note that in the case of the ``for``
@ -546,14 +567,9 @@ iterate over, say, the directors; we have to iterate with the exact
format as the keys themselves. In this case, we need squared brackets format as the keys themselves. In this case, we need squared brackets
surrounding four temporary variables to act as a collection for our surrounding four temporary variables to act as a collection for our
iteration. While this is a contrived example, we could easily have iteration. While this is a contrived example, we could easily have
had keys containing IP addresses (``addr``), ports (``port``) and even a ``string`` had keys containing IP addresses (``addr``), ports (``port``) and even
calculated as the result of a reverse hostname lookup. a ``string`` calculated as the result of a reverse hostname lookup.
.. btest-include:: ${DOC_ROOT}/scripting/data_struct_table_complex.bro
.. btest:: data_struct_table_complex
@TEST-EXEC: btest-rst-cmd bro -b ${DOC_ROOT}/scripting/data_struct_table_complex.bro
Vectors Vectors
~~~~~~~ ~~~~~~~
@ -657,7 +673,7 @@ using a 20 bit subnet mask.
Because this is a script that doesn't use any kind of network Because this is a script that doesn't use any kind of network
analysis, we can handle the event :bro:id:`bro_init` which is always analysis, we can handle the event :bro:id:`bro_init` which is always
generated by Bro's core upon startup. On lines six and seven, two generated by Bro's core upon startup. In the example script, two
locally scoped vectors are created to hold our lists of subnets and IP locally scoped vectors are created to hold our lists of subnets and IP
addresses respectively. Then, using a set of nested ``for`` loops, we addresses respectively. Then, using a set of nested ``for`` loops, we
iterate over every subnet and every IP address and use an ``if`` iterate over every subnet and every IP address and use an ``if``
@ -714,7 +730,7 @@ Bro supports ``usec``, ``msec``, ``sec``, ``min``, ``hr``, or ``day`` which repr
microseconds, milliseconds, seconds, minutes, hours, and days microseconds, milliseconds, seconds, minutes, hours, and days
respectively. In fact, the interval data type allows for a surprising respectively. In fact, the interval data type allows for a surprising
amount of variation in its definitions. There can be a space between amount of variation in its definitions. There can be a space between
the numeric constant or they can crammed together like a temporal the numeric constant or they can be crammed together like a temporal
portmanteau. The time unit can be either singular or plural. All of portmanteau. The time unit can be either singular or plural. All of
this adds up to to the fact that both ``42hrs`` and ``42 hr`` are this adds up to to the fact that both ``42hrs`` and ``42 hr`` are
perfectly valid and logically equivalent in Bro. The point, however, perfectly valid and logically equivalent in Bro. The point, however,
@ -760,7 +776,7 @@ string against which it will be tested to be on the right.
In the sample above, two local variables are declared to hold our In the sample above, two local variables are declared to hold our
sample sentence and regular expression. Our regular expression in sample sentence and regular expression. Our regular expression in
this case will return true if the string contains either the word this case will return true if the string contains either the word
``quick`` or the word ``fox``. The ``if`` statement on line six uses ``quick`` or the word ``fox``. The ``if`` statement in the script uses
embedded matching and the ``in`` operator to check for the existence embedded matching and the ``in`` operator to check for the existence
of the pattern within the string. If the statement resolves to true, of the pattern within the string. If the statement resolves to true,
:bro:id:`split` is called to break the string into separate pieces. :bro:id:`split` is called to break the string into separate pieces.
@ -768,8 +784,8 @@ of the pattern within the string. If the statement resolves to true,
table of strings indexed by a count. Each element of the table will table of strings indexed by a count. Each element of the table will
be the segments before and after any matches against the pattern but be the segments before and after any matches against the pattern but
excluding the actual matches. In this case, our pattern matches excluding the actual matches. In this case, our pattern matches
twice, and results in a table with three entries. Lines 11 through 13 twice, and results in a table with three entries. The ``print`` statements
print the contents of the table in order. in the script will print the contents of the table in order.
.. btest:: data_type_pattern .. btest:: data_type_pattern
@ -780,7 +796,7 @@ inequality operators through the ``==`` and ``!=`` operators
respectively. When used in this manner however, the string must match respectively. When used in this manner however, the string must match
entirely to resolve to true. For example, the script below uses two entirely to resolve to true. For example, the script below uses two
ternary conditional statements to illustrate the use of the ``==`` ternary conditional statements to illustrate the use of the ``==``
operators with patterns. On lines 8 and 11 the output is altered based operator with patterns. The output is altered based
on the result of the comparison between the pattern and the string. on the result of the comparison between the pattern and the string.
.. btest-include:: ${DOC_ROOT}/scripting/data_type_pattern_02.bro .. btest-include:: ${DOC_ROOT}/scripting/data_type_pattern_02.bro
@ -803,7 +819,7 @@ with the ``typedef`` and ``struct`` keywords, Bro allows you to cobble
together new data types to suit the needs of your situation. together new data types to suit the needs of your situation.
When combined with the ``type`` keyword, ``record`` can generate a When combined with the ``type`` keyword, ``record`` can generate a
composite type. We have, in fact, already encountered a a complex composite type. We have, in fact, already encountered a complex
example of the ``record`` data type in the earlier sections, the example of the ``record`` data type in the earlier sections, the
:bro:type:`connection` record passed to many events. Another one, :bro:type:`connection` record passed to many events. Another one,
:bro:type:`Conn::Info`, which corresponds to the fields logged into :bro:type:`Conn::Info`, which corresponds to the fields logged into
@ -915,11 +931,7 @@ through a contrived example of simply logging the digits 1 through 10
and their corresponding factorial to the default ASCII log writer. and their corresponding factorial to the default ASCII log writer.
It's always best to work through the problem once, simulating the It's always best to work through the problem once, simulating the
desired output with ``print`` and ``fmt`` before attempting to dive desired output with ``print`` and ``fmt`` before attempting to dive
into the Logging Framework. Below is a script that defines a into the Logging Framework.
factorial function to recursively calculate the factorial of a
unsigned integer passed as an argument to the function. Using
``print`` and :bro:id:`fmt` we can ensure that Bro can perform these
calculations correctly as well get an idea of the answers ourselves.
.. btest-include:: ${DOC_ROOT}/scripting/framework_logging_factorial_01.bro .. btest-include:: ${DOC_ROOT}/scripting/framework_logging_factorial_01.bro
@ -927,19 +939,28 @@ calculations correctly as well get an idea of the answers ourselves.
@TEST-EXEC: btest-rst-cmd bro ${DOC_ROOT}/scripting/framework_logging_factorial_01.bro @TEST-EXEC: btest-rst-cmd bro ${DOC_ROOT}/scripting/framework_logging_factorial_01.bro
This script defines a factorial function to recursively calculate the
factorial of a unsigned integer passed as an argument to the function. Using
``print`` and :bro:id:`fmt` we can ensure that Bro can perform these
calculations correctly as well get an idea of the answers ourselves.
The output of the script aligns with what we expect so now it's time The output of the script aligns with what we expect so now it's time
to integrate the Logging Framework. As mentioned above we have to to integrate the Logging Framework.
perform a few steps before we can issue the :bro:id:`Log::write`
method and produce a logfile. As we are working within a namespace .. btest-include:: ${DOC_ROOT}/scripting/framework_logging_factorial_02.bro
and informing an outside entity of workings and data internal to the
namespace, we use an ``export`` block. First we need to inform Bro As mentioned above we have to perform a few steps before we can
issue the :bro:id:`Log::write` method and produce a logfile.
As we are working within a namespace and informing an outside
entity of workings and data internal to the namespace, we use
an ``export`` block. First we need to inform Bro
that we are going to be adding another Log Stream by adding a value to that we are going to be adding another Log Stream by adding a value to
the :bro:type:`Log::ID` enumerable. In line 6 of the script, we append the the :bro:type:`Log::ID` enumerable. In this script, we append the
value ``LOG`` to the ``Log::ID`` enumerable, however due to this being in value ``LOG`` to the ``Log::ID`` enumerable, however due to this being in
an export block the value appended to ``Log::ID`` is actually an export block the value appended to ``Log::ID`` is actually
``Factor::Log``. Next, we need to define the name and value pairs ``Factor::Log``. Next, we need to define the name and value pairs
that make up the data of our logs and dictate its format. Lines 8 that make up the data of our logs and dictate its format. This script
through 11 define a new datatype called an ``Info`` record (actually, defines a new record datatype called ``Info`` (actually,
``Factor::Info``) with two fields, both unsigned integers. Each of the ``Factor::Info``) with two fields, both unsigned integers. Each of the
fields in the ``Factor::Log`` record type include the ``&log`` fields in the ``Factor::Log`` record type include the ``&log``
attribute, indicating that these fields should be passed to the attribute, indicating that these fields should be passed to the
@ -947,16 +968,14 @@ Logging Framework when ``Log::write`` is called. Were there to be
any name value pairs without the ``&log`` attribute, those fields any name value pairs without the ``&log`` attribute, those fields
would simply be ignored during logging but remain available for the would simply be ignored during logging but remain available for the
lifespan of the variable. The next step is to create the logging lifespan of the variable. The next step is to create the logging
stream with :bro:id:`Log::create_stream` which takes a Log::ID and a stream with :bro:id:`Log::create_stream` which takes a ``Log::ID`` and a
record as its arguments. In this example, on line 25, we call the record as its arguments. In this example, we call the
``Log::create_stream`` method and pass ``Factor::LOG`` and the ``Log::create_stream`` method and pass ``Factor::LOG`` and the
``Factor::Info`` record as arguments. From here on out, if we issue ``Factor::Info`` record as arguments. From here on out, if we issue
the ``Log::write`` command with the correct ``Log::ID`` and a properly the ``Log::write`` command with the correct ``Log::ID`` and a properly
formatted ``Factor::Info`` record, a log entry will be generated. formatted ``Factor::Info`` record, a log entry will be generated.
.. btest-include:: ${DOC_ROOT}/scripting/framework_logging_factorial_02.bro Now, if we run this script, instead of generating
Now, if we run the new version of the script, instead of generating
logging information to stdout, no output is created. Instead the logging information to stdout, no output is created. Instead the
output is all in ``factor.log``, properly formatted and organized. output is all in ``factor.log``, properly formatted and organized.
@ -995,13 +1014,13 @@ remaining logs to factor.log.
:lines: 38-62 :lines: 38-62
:linenos: :linenos:
To dynamically alter the file in which a stream writes its logs a To dynamically alter the file in which a stream writes its logs, a
filter can specify function returns a string to be used as the filter can specify a function that returns a string to be used as the
filename for the current call to ``Log::write``. The definition for filename for the current call to ``Log::write``. The definition for
this function has to take as its parameters a ``Log::ID`` called id, a this function has to take as its parameters a ``Log::ID`` called id, a
string called ``path`` and the appropriate record type for the logs called string called ``path`` and the appropriate record type for the logs called
``rec``. You can see the definition of ``mod5`` used in this example on ``rec``. You can see the definition of ``mod5`` used in this example
line one conforms to that requirement. The function simply returns conforms to that requirement. The function simply returns
``factor-mod5`` if the factorial is divisible evenly by 5, otherwise, it ``factor-mod5`` if the factorial is divisible evenly by 5, otherwise, it
returns ``factor-non5``. In the additional ``bro_init`` event returns ``factor-non5``. In the additional ``bro_init`` event
handler, we define a locally scoped ``Log::Filter`` and assign it a handler, we define a locally scoped ``Log::Filter`` and assign it a
@ -1074,7 +1093,8 @@ make a call to :bro:id:`NOTICE` supplying it with an appropriate
:bro:type:`Notice::Info` record. Often times the call to ``NOTICE`` :bro:type:`Notice::Info` record. Often times the call to ``NOTICE``
includes just the ``Notice::Type``, and a concise message. There are includes just the ``Notice::Type``, and a concise message. There are
however, significantly more options available when raising notices as however, significantly more options available when raising notices as
seen in the table below. The only field in the table below whose seen in the definition of :bro:type:`Notice::Info`. The only field in
``Notice::Info`` whose
attributes make it a required field is the ``note`` field. Still, attributes make it a required field is the ``note`` field. Still,
good manners are always important and including a concise message in good manners are always important and including a concise message in
``$msg`` and, where necessary, the contents of the connection record ``$msg`` and, where necessary, the contents of the connection record
@ -1086,57 +1106,6 @@ that are commonly included, ``$identifier`` and ``$suppress_for`` are
built around the automated suppression feature of the Notice Framework built around the automated suppression feature of the Notice Framework
which we will cover shortly. which we will cover shortly.
.. todo::
Once the link to ``Notice::Info`` work I think we should take out
the table. That's too easy to get out of date.
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| Field | Type | Attributes | Use |
+=====================+==================================================================+================+========================================+
| ts | time | &log &optional | The time of the notice |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| uid | string | &log &optional | A unique connection ID |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| id | conn_id | &log &optional | A 4-tuple to identify endpoints |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| conn | connection | &optional | Shorthand for the uid and id |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| iconn | icmp_conn | &optional | Shorthand for the uid and id |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| proto | transport_proto | &log &optional | Transport protocol |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| note | Notice::Type | &log | The Notice::Type of the notice |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| msg | string | &log &optional | Human readable message |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| sub | string | &log &optional | Human readable message |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| src | addr | &log &optional | Source address if no conn_id |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| dst | addr | &log &optional | Destination addr if no conn_id |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| p | port | &log &optional | Port if no conn_id |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| n | count | &log &optional | Count or status code |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| src_peer | event_peer | &log &optional | Peer that raised the notice |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| peer_descr | string | &log &optional | Text description of the src_peer |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| actions | set[Notice::Action] | &log &optional | Actions applied to the notice |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| policy_items | set[count] | &log &optional | Policy items that have been applied |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| email_body_sections | vector | &optional | Body of the email for email notices. |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| email_delay_tokens | set[string] | &optional | Delay functionality for email notices. |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| identifier | string | &optional | A unique string identifier |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
| suppress_for | interval | &log &optional | Length of time to suppress a notice. |
+---------------------+------------------------------------------------------------------+----------------+----------------------------------------+
One of the default policy scripts raises a notice when an SSH login One of the default policy scripts raises a notice when an SSH login
has been heuristically detected and the originating hostname is one has been heuristically detected and the originating hostname is one
that would raise suspicion. Effectively, the script attempts to that would raise suspicion. Effectively, the script attempts to
@ -1153,15 +1122,15 @@ possible while staying concise.
While much of the script relates to the actual detection, the parts While much of the script relates to the actual detection, the parts
specific to the Notice Framework are actually quite interesting in specific to the Notice Framework are actually quite interesting in
themselves. On line 18 the script's ``export`` block adds the value themselves. The script's ``export`` block adds the value
``SSH::Interesting_Hostname_Login`` to the enumerable constant ``SSH::Interesting_Hostname_Login`` to the enumerable constant
``Notice::Type`` to indicate to the Bro core that a new type of notice ``Notice::Type`` to indicate to the Bro core that a new type of notice
is being defined. The script then calls ``NOTICE`` and defines the is being defined. The script then calls ``NOTICE`` and defines the
``$note``, ``$msg``, ``$sub`` and ``$conn`` fields of the ``$note``, ``$msg``, ``$sub`` and ``$conn`` fields of the
:bro:type:`Notice::Info` record. Line 42 also includes a ternary if :bro:type:`Notice::Info` record. There are two ternary if
statement that modifies the ``$msg`` text depending on whether the statements that modify the ``$msg`` text depending on whether the
host is a local address and whether it is the client or the server. host is a local address and whether it is the client or the server.
This use of :bro:id:`fmt` and a ternary operators is a concise way to This use of :bro:id:`fmt` and ternary operators is a concise way to
lend readability to the notices that are generated without the need lend readability to the notices that are generated without the need
for branching ``if`` statements that each raise a specific notice. for branching ``if`` statements that each raise a specific notice.
@ -1222,7 +1191,7 @@ from the connection relative to the behavior that has been observed by
Bro. Bro.
.. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ssl/expiring-certs.bro .. btest-include:: ${BRO_SRC_ROOT}/scripts/policy/protocols/ssl/expiring-certs.bro
:lines: 60-63 :lines: 64-68
In the :doc:`/scripts/policy/protocols/ssl/expiring-certs.bro` script In the :doc:`/scripts/policy/protocols/ssl/expiring-certs.bro` script
which identifies when SSL certificates are set to expire and raises which identifies when SSL certificates are set to expire and raises
@ -1302,9 +1271,9 @@ in the call to ``NOTICE``.
.. btest-include:: ${DOC_ROOT}/scripting/framework_notice_shortcuts_01.bro .. btest-include:: ${DOC_ROOT}/scripting/framework_notice_shortcuts_01.bro
The Notice Policy shortcut above adds the ``Notice::Types`` of The Notice Policy shortcut above adds the ``Notice::Type`` of
SSH::Interesting_Hostname_Login and SSH::Login to the ``SSH::Interesting_Hostname_Login`` to the
Notice::emailed_types set while the shortcut below alters the length ``Notice::emailed_types`` set while the shortcut below alters the length
of time for which those notices will be suppressed. of time for which those notices will be suppressed.
.. btest-include:: ${DOC_ROOT}/scripting/framework_notice_shortcuts_02.bro .. btest-include:: ${DOC_ROOT}/scripting/framework_notice_shortcuts_02.bro

1
magic

@ -1 +0,0 @@
Subproject commit 99c6b89230e2b9b0e781c42b0b9412d2ab4e14b2

5
man/CMakeLists.txt Normal file
View file

@ -0,0 +1,5 @@
install(DIRECTORY . DESTINATION ${BRO_MAN_INSTALL_PATH}/man8 FILES_MATCHING
PATTERN "*.8"
)

164
man/bro.8 Normal file
View file

@ -0,0 +1,164 @@
.TH BRO "8" "November 2014" "bro" "System Administration Utilities"
.SH NAME
bro \- passive network traffic analyzer
.SH SYNOPSIS
.B bro
\/\fP [\fIoptions\fR] [\fIfile\fR ...]
.SH DESCRIPTION
Bro is primarily a security monitor that inspects all traffic on a link in
depth for signs of suspicious activity. More generally, however, Bro
supports a wide range of traffic analysis tasks even outside of the
security domain, including performance measurements and helping with
trouble-shooting.
Bro comes with built-in functionality for a range of analysis and detection
tasks, including detecting malware by interfacing to external registries,
reporting vulnerable versions of software seen on the network, identifying
popular web applications, detecting SSH brute-forcing, validating SSL
certificate chains, among others.
.SH OPTIONS
.TP
.B <file>
policy file, or read stdin
.TP
\fB\-a\fR,\ \-\-parse\-only
exit immediately after parsing scripts
.TP
\fB\-b\fR,\ \-\-bare\-mode
don't load scripts from the base/ directory
.TP
\fB\-d\fR,\ \-\-debug\-policy
activate policy file debugging
.TP
\fB\-e\fR,\ \-\-exec <bro code>
augment loaded policies by given code
.TP
\fB\-f\fR,\ \-\-filter <filter>
tcpdump filter
.TP
\fB\-g\fR,\ \-\-dump\-config
dump current config into .state dir
.TP
\fB\-h\fR,\ \-\-help|\-?
command line help
.TP
\fB\-i\fR,\ \-\-iface <interface>
read from given interface
.TP
\fB\-p\fR,\ \-\-prefix <prefix>
add given prefix to policy file resolution
.TP
\fB\-r\fR,\ \-\-readfile <readfile>
read from given tcpdump file
.TP
\fB\-y\fR,\ \-\-flowfile <file>[=<ident>]
read from given flow file
.TP
\fB\-Y\fR,\ \-\-netflow <ip>:<prt>[=<id>]
read flow from socket
.TP
\fB\-s\fR,\ \-\-rulefile <rulefile>
read rules from given file
.TP
\fB\-t\fR,\ \-\-tracefile <tracefile>
activate execution tracing
.TP
\fB\-w\fR,\ \-\-writefile <writefile>
write to given tcpdump file
.TP
\fB\-v\fR,\ \-\-version
print version and exit
.TP
\fB\-x\fR,\ \-\-print\-state <file.bst>
print contents of state file
.TP
\fB\-z\fR,\ \-\-analyze <analysis>
run the specified policy file analysis
.TP
\fB\-C\fR,\ \-\-no\-checksums
ignore checksums
.TP
\fB\-D\fR,\ \-\-dfa\-size <size>
DFA state cache size
.TP
\fB\-F\fR,\ \-\-force\-dns
force DNS
.TP
\fB\-I\fR,\ \-\-print\-id <ID name>
print out given ID
.TP
\fB\-K\fR,\ \-\-md5\-hashkey <hashkey>
set key for MD5\-keyed hashing
.TP
\fB\-L\fR,\ \-\-rule\-benchmark
benchmark for rules
.TP
\fB\-N\fR,\ \-\-print\-plugins
print available plugins and exit (\fB\-NN\fR for verbose)
.TP
\fB\-O\fR,\ \-\-optimize
optimize policy script
.TP
\fB\-P\fR,\ \-\-prime\-dns
prime DNS
.TP
\fB\-Q\fR,\ \-\-time
print execution time summary to stderr
.TP
\fB\-R\fR,\ \-\-replay <events.bst>
replay events
.TP
\fB\-S\fR,\ \-\-debug\-rules
enable rule debugging
.TP
\fB\-T\fR,\ \-\-re\-level <level>
set 'RE_level' for rules
.TP
\fB\-U\fR,\ \-\-status\-file <file>
Record process status in file
.TP
\fB\-W\fR,\ \-\-watchdog
activate watchdog timer
.TP
\fB\-X\fR,\ \-\-broxygen
generate documentation based on config file
.TP
\fB\-\-pseudo\-realtime[=\fR<speedup>]
enable pseudo\-realtime for performance evaluation (default 1)
.TP
\fB\-\-load\-seeds\fR <file>
load seeds from given file
.TP
\fB\-\-save\-seeds\fR <file>
save seeds to given file
.SH ENVIRONMENT
.TP
.B BROPATH
file search path
.TP
.B BRO_PLUGIN_PATH
plugin search path
.TP
.B BRO_PLUGIN_ACTIVATE
plugins to always activate
.TP
.B BRO_PREFIXES
prefix list
.TP
.B BRO_DNS_FAKE
disable DNS lookups
.TP
.B BRO_SEED_FILE
file to load seeds from
.TP
.B BRO_LOG_SUFFIX
ASCII log file extension
.TP
.B BRO_PROFILER_FILE
Output file for script execution statistics
.TP
.B BRO_DISABLE_BROXYGEN
Disable Broxygen documentation support
.SH AUTHOR
.B bro
was written by The Bro Project <info@bro.org>.

View file

@ -16,6 +16,7 @@ the 'dpkg-dev' package, please install it first.
} }
prefix=/opt/bro prefix=/opt/bro
localstatedir=/var/opt/bro
# During the packaging process, `dpkg-shlibs` will fail if used on a library # During the packaging process, `dpkg-shlibs` will fail if used on a library
# that links to other internal/project libraries unless an RPATH is used or # that links to other internal/project libraries unless an RPATH is used or
@ -31,7 +32,7 @@ cd ..
( cd build && make package ) ( cd build && make package )
# Full Bro package # Full Bro package
./configure --prefix=${prefix} --pkg-name-prefix=Bro --binary-package ./configure --prefix=${prefix} --localstatedir=${localstatedir} --pkg-name-prefix=Bro --binary-package
( cd build && make package ) ( cd build && make package )
# Broccoli # Broccoli
@ -42,6 +43,6 @@ cd ../..
# Broctl # Broctl
cd aux/broctl cd aux/broctl
./configure --prefix=${prefix} --binary-package ./configure --prefix=${prefix} --localstatedir=${localstatedir} --binary-package
( cd build && make package && mv *.deb ../../../build/ ) ( cd build && make package && mv *.deb ../../../build/ )
cd ../.. cd ../..

View file

@ -15,6 +15,7 @@ the 'rpm-build' package, please install it first.
} }
prefix=/opt/bro prefix=/opt/bro
localstatedir=/var/opt/bro
cd .. cd ..
@ -24,7 +25,7 @@ cd ..
( cd build && make package ) ( cd build && make package )
# Full Bro package # Full Bro package
./configure --prefix=${prefix} --pkg-name-prefix=Bro --binary-package ./configure --prefix=${prefix} --localstatedir=${localstatedir} --pkg-name-prefix=Bro --binary-package
( cd build && make package ) ( cd build && make package )
# Broccoli # Broccoli
@ -35,6 +36,6 @@ cd ../..
# Broctl # Broctl
cd aux/broctl cd aux/broctl
./configure --prefix=${prefix} --binary-package ./configure --prefix=${prefix} --localstatedir=${localstatedir} --binary-package
( cd build && make package && mv *.rpm ../../../build/ ) ( cd build && make package && mv *.rpm ../../../build/ )
cd ../.. cd ../..

View file

@ -0,0 +1 @@
Support for X509 certificates with the file analysis framework.

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,77 @@
@load base/frameworks/files
@load base/files/hash
module X509;
export {
redef enum Log::ID += { LOG };
type Info: record {
## Current timestamp.
ts: time &log;
## File id of this certificate.
id: string &log;
## Basic information about the certificate.
certificate: X509::Certificate &log;
## The opaque wrapping the certificate. Mainly used
## for the verify operations.
handle: opaque of x509;
## All extensions that were encountered in the certificate.
extensions: vector of X509::Extension &default=vector();
## Subject alternative name extension of the certificate.
san: X509::SubjectAlternativeName &optional &log;
## Basic constraints extension of the certificate.
basic_constraints: X509::BasicConstraints &optional &log;
};
## Event for accessing logged records.
global log_x509: event(rec: Info);
}
event bro_init() &priority=5
{
Log::create_stream(X509::LOG, [$columns=Info, $ev=log_x509]);
}
redef record Files::Info += {
## Information about X509 certificates. This is used to keep
## certificate information until all events have been received.
x509: X509::Info &optional;
};
event x509_certificate(f: fa_file, cert_ref: opaque of x509, cert: X509::Certificate) &priority=5
{
f$info$x509 = [$ts=f$info$ts, $id=f$id, $certificate=cert, $handle=cert_ref];
}
event x509_extension(f: fa_file, ext: X509::Extension) &priority=5
{
if ( f$info?$x509 )
f$info$x509$extensions[|f$info$x509$extensions|] = ext;
}
event x509_ext_basic_constraints(f: fa_file, ext: X509::BasicConstraints) &priority=5
{
if ( f$info?$x509 )
f$info$x509$basic_constraints = ext;
}
event x509_ext_subject_alternative_name(f: fa_file, ext: X509::SubjectAlternativeName) &priority=5
{
if ( f$info?$x509 )
f$info$x509$san = ext;
}
event file_state_remove(f: fa_file) &priority=5
{
if ( ! f$info?$x509 )
return;
Log::write(LOG, f$info$x509);
}

View file

@ -1 +1,2 @@
@load ./main.bro @load ./main.bro
@load ./magic

View file

@ -0,0 +1,2 @@
@load-sigs ./general
@load-sigs ./libmagic

View file

@ -0,0 +1,16 @@
# General purpose file magic signatures.
signature file-plaintext {
file-magic /([[:print:][:space:]]{10})/
file-mime "text/plain", -20
}
signature file-tar {
file-magic /([[:print:]\x00]){100}(([[:digit:]\x00\x20]){8}){3}/
file-mime "application/x-tar", 150
}
signature file-swf {
file-magic /(F|C|Z)WS/
file-mime "application/x-shockwave-flash", 60
}

File diff suppressed because it is too large Load diff

View file

@ -41,38 +41,39 @@ export {
## If this file was transferred over a network ## If this file was transferred over a network
## connection this should show the host or hosts that ## connection this should show the host or hosts that
## the data sourced from. ## the data sourced from.
tx_hosts: set[addr] &log; tx_hosts: set[addr] &default=addr_set() &log;
## If this file was transferred over a network ## If this file was transferred over a network
## connection this should show the host or hosts that ## connection this should show the host or hosts that
## the data traveled to. ## the data traveled to.
rx_hosts: set[addr] &log; rx_hosts: set[addr] &default=addr_set() &log;
## Connection UIDs over which the file was transferred. ## Connection UIDs over which the file was transferred.
conn_uids: set[string] &log; conn_uids: set[string] &default=string_set() &log;
## An identification of the source of the file data. E.g. it ## An identification of the source of the file data. E.g. it
## may be a network protocol over which it was transferred, or a ## may be a network protocol over which it was transferred, or a
## local file path which was read, or some other input source. ## local file path which was read, or some other input source.
source: string &log &optional; source: string &log &optional;
## A value to represent the depth of this file in relation ## A value to represent the depth of this file in relation
## to its source. In SMTP, it is the depth of the MIME ## to its source. In SMTP, it is the depth of the MIME
## attachment on the message. In HTTP, it is the depth of the ## attachment on the message. In HTTP, it is the depth of the
## request within the TCP connection. ## request within the TCP connection.
depth: count &default=0 &log; depth: count &default=0 &log;
## A set of analysis types done during the file analysis. ## A set of analysis types done during the file analysis.
analyzers: set[string] &log; analyzers: set[string] &default=string_set() &log;
## A mime type provided by libmagic against the *bof_buffer* ## A mime type provided by the strongest file magic signature
## field of :bro:see:`fa_file`, or in the cases where no ## match against the *bof_buffer* field of :bro:see:`fa_file`,
## buffering of the beginning of file occurs, an initial ## or in the cases where no buffering of the beginning of file
## guess of the mime type based on the first data seen. ## occurs, an initial guess of the mime type based on the first
## data seen.
mime_type: string &log &optional; mime_type: string &log &optional;
## A filename for the file if one is available from the source ## A filename for the file if one is available from the source
## for the file. These will frequently come from ## for the file. These will frequently come from
## "Content-Disposition" headers in network protocols. ## "Content-Disposition" headers in network protocols.
filename: string &log &optional; filename: string &log &optional;
@ -148,10 +149,19 @@ export {
## Returns: true if the analyzer will be added, or false if analysis ## Returns: true if the analyzer will be added, or false if analysis
## for the file isn't currently active or the *args* ## for the file isn't currently active or the *args*
## were invalid for the analyzer type. ## were invalid for the analyzer type.
global add_analyzer: function(f: fa_file, global add_analyzer: function(f: fa_file,
tag: Files::Tag, tag: Files::Tag,
args: AnalyzerArgs &default=AnalyzerArgs()): bool; args: AnalyzerArgs &default=AnalyzerArgs()): bool;
## Adds all analyzers associated with a give MIME type to the analysis of
## a file. Note that analyzers added via MIME types cannot take further
## arguments.
##
## f: the file.
##
## mtype: the MIME type; it will be compared case-insensitive.
global add_analyzers_for_mime_type: function(f: fa_file, mtype: string);
## Removes an analyzer from the analysis of a given file. ## Removes an analyzer from the analysis of a given file.
## ##
## f: the file. ## f: the file.
@ -195,7 +205,7 @@ export {
## A callback to generate a file handle on demand when ## A callback to generate a file handle on demand when
## one is needed by the core. ## one is needed by the core.
get_file_handle: function(c: connection, is_orig: bool): string; get_file_handle: function(c: connection, is_orig: bool): string;
## A callback to "describe" a file. In the case of an HTTP ## A callback to "describe" a file. In the case of an HTTP
## transfer the most obvious description would be the URL. ## transfer the most obvious description would be the URL.
## It's like an extremely compressed version of the normal log. ## It's like an extremely compressed version of the normal log.
@ -206,7 +216,7 @@ export {
## Register callbacks for protocols that work with the Files framework. ## Register callbacks for protocols that work with the Files framework.
## The callbacks must uniquely identify a file and each protocol can ## The callbacks must uniquely identify a file and each protocol can
## only have a single callback registered for it. ## only have a single callback registered for it.
## ##
## tag: Tag for the protocol analyzer having a callback being registered. ## tag: Tag for the protocol analyzer having a callback being registered.
## ##
## reg: A :bro:see:`Files::ProtoRegistration` record. ## reg: A :bro:see:`Files::ProtoRegistration` record.
@ -224,6 +234,42 @@ export {
## callback: Function to execute when the given file analyzer is being added. ## callback: Function to execute when the given file analyzer is being added.
global register_analyzer_add_callback: function(tag: Files::Tag, callback: function(f: fa_file, args: AnalyzerArgs)); global register_analyzer_add_callback: function(tag: Files::Tag, callback: function(f: fa_file, args: AnalyzerArgs));
## Registers a set of MIME types for an analyzer. If a future connection on one of
## these types is seen, the analyzer will be automatically assigned to parsing it.
## The function *adds* to all MIME types already registered, it doesn't replace
## them.
##
## tag: The tag of the analyzer.
##
## mts: The set of MIME types, each in the form "foo/bar" (case-insensitive).
##
## Returns: True if the MIME types were successfully registered.
global register_for_mime_types: function(tag: Analyzer::Tag, mts: set[string]) : bool;
## Registers a MIME type for an analyzer. If a future file with this type is seen,
## the analyzer will be automatically assigned to parsing it. The function *adds*
## to all MIME types already registered, it doesn't replace them.
##
## tag: The tag of the analyzer.
##
## mt: The MIME type in the form "foo/bar" (case-insensitive).
##
## Returns: True if the MIME type was successfully registered.
global register_for_mime_type: function(tag: Analyzer::Tag, mt: string) : bool;
## Returns a set of all MIME types currently registered for a specific analyzer.
##
## tag: The tag of the analyzer.
##
## Returns: The set of MIME types.
global registered_mime_types: function(tag: Analyzer::Tag) : set[string];
## Returns a table of all MIME-type-to-analyzer mappings currently registered.
##
## Returns: A table mapping each analyzer to the set of MIME types
## registered for it.
global all_registered_mime_types: function() : table[Analyzer::Tag] of set[string];
## Event that can be handled to access the Info record as it is sent on ## Event that can be handled to access the Info record as it is sent on
## to the logging framework. ## to the logging framework.
global log_files: event(rec: Info); global log_files: event(rec: Info);
@ -236,6 +282,9 @@ redef record fa_file += {
# Store the callbacks for protocol analyzers that have files. # Store the callbacks for protocol analyzers that have files.
global registered_protocols: table[Analyzer::Tag] of ProtoRegistration = table(); global registered_protocols: table[Analyzer::Tag] of ProtoRegistration = table();
# Store the MIME type to analyzer mappings.
global mime_types: table[Analyzer::Tag] of set[string];
global analyzer_add_callbacks: table[Files::Tag] of function(f: fa_file, args: AnalyzerArgs) = table(); global analyzer_add_callbacks: table[Files::Tag] of function(f: fa_file, args: AnalyzerArgs) = table();
event bro_init() &priority=5 event bro_init() &priority=5
@ -258,13 +307,13 @@ function set_info(f: fa_file)
f$info$source = f$source; f$info$source = f$source;
f$info$duration = f$last_active - f$info$ts; f$info$duration = f$last_active - f$info$ts;
f$info$seen_bytes = f$seen_bytes; f$info$seen_bytes = f$seen_bytes;
if ( f?$total_bytes ) if ( f?$total_bytes )
f$info$total_bytes = f$total_bytes; f$info$total_bytes = f$total_bytes;
f$info$missing_bytes = f$missing_bytes; f$info$missing_bytes = f$missing_bytes;
f$info$overflow_bytes = f$overflow_bytes; f$info$overflow_bytes = f$overflow_bytes;
if ( f?$is_orig ) if ( f?$is_orig )
f$info$is_orig = f$is_orig; f$info$is_orig = f$is_orig;
if ( f?$mime_type ) if ( f?$mime_type )
f$info$mime_type = f$mime_type; f$info$mime_type = f$mime_type;
} }
@ -288,6 +337,15 @@ function add_analyzer(f: fa_file, tag: Files::Tag, args: AnalyzerArgs): bool
return T; return T;
} }
function add_analyzers_for_mime_type(f: fa_file, mtype: string)
{
local dummy_args: AnalyzerArgs;
local analyzers = __add_analyzers_for_mime_type(f$id, mtype, dummy_args);
for ( tag in analyzers )
add f$info$analyzers[Files::analyzer_name(tag)];
}
function register_analyzer_add_callback(tag: Files::Tag, callback: function(f: fa_file, args: AnalyzerArgs)) function register_analyzer_add_callback(tag: Files::Tag, callback: function(f: fa_file, args: AnalyzerArgs))
{ {
analyzer_add_callbacks[tag] = callback; analyzer_add_callbacks[tag] = callback;
@ -311,6 +369,9 @@ function analyzer_name(tag: Files::Tag): string
event file_new(f: fa_file) &priority=10 event file_new(f: fa_file) &priority=10
{ {
set_info(f); set_info(f);
if ( f?$mime_type )
add_analyzers_for_mime_type(f, f$mime_type);
} }
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=10 event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=10
@ -348,6 +409,41 @@ function register_protocol(tag: Analyzer::Tag, reg: ProtoRegistration): bool
return result; return result;
} }
function register_for_mime_types(tag: Analyzer::Tag, mime_types: set[string]) : bool
{
local rc = T;
for ( mt in mime_types )
{
if ( ! register_for_mime_type(tag, mt) )
rc = F;
}
return rc;
}
function register_for_mime_type(tag: Analyzer::Tag, mt: string) : bool
{
if ( ! __register_for_mime_type(tag, mt) )
return F;
if ( tag !in mime_types )
mime_types[tag] = set();
add mime_types[tag][mt];
return T;
}
function registered_mime_types(tag: Analyzer::Tag) : set[string]
{
return tag in mime_types ? mime_types[tag] : set();
}
function all_registered_mime_types(): table[Analyzer::Tag] of set[string]
{
return mime_types;
}
function describe(f: fa_file): string function describe(f: fa_file): string
{ {
local tag = Analyzer::get_tag(f$source); local tag = Analyzer::get_tag(f$source);

View file

@ -4,6 +4,17 @@
module Input; module Input;
export { export {
type Event: enum {
EVENT_NEW = 0,
EVENT_CHANGED = 1,
EVENT_REMOVED = 2,
};
type Mode: enum {
MANUAL = 0,
REREAD = 1,
STREAM = 2
};
## The default input reader used. Defaults to `READER_ASCII`. ## The default input reader used. Defaults to `READER_ASCII`.
const default_reader = READER_ASCII &redef; const default_reader = READER_ASCII &redef;

View file

@ -81,6 +81,9 @@ export {
## Where the data was discovered. ## Where the data was discovered.
where: Where &log; where: Where &log;
## The name of the node where the match was discovered.
node: string &optional &log;
## If the data was discovered within a connection, the ## If the data was discovered within a connection, the
## connection record should go here to give context to the data. ## connection record should go here to give context to the data.
conn: connection &optional; conn: connection &optional;
@ -240,6 +243,11 @@ function Intel::seen(s: Seen)
s$indicator_type = Intel::ADDR; s$indicator_type = Intel::ADDR;
} }
if ( ! s?$node )
{
s$node = peer_description;
}
if ( have_full_data ) if ( have_full_data )
{ {
local items = get_items(s); local items = get_items(s);

View file

@ -1,7 +1,5 @@
@load ./main @load ./main
@load ./postprocessors @load ./postprocessors
@load ./writers/ascii @load ./writers/ascii
@load ./writers/dataseries
@load ./writers/sqlite @load ./writers/sqlite
@load ./writers/elasticsearch
@load ./writers/none @load ./writers/none

View file

@ -5,9 +5,15 @@
module Log; module Log;
# Log::ID and Log::Writer are defined in types.bif due to circular dependencies.
export { export {
## Type that defines an ID unique to each log stream. Scripts creating new log
## streams need to redef this enum to add their own specific log ID. The log ID
## implicitly determines the default name of the generated log file.
type Log::ID: enum {
## Dummy place-holder.
UNKNOWN
};
## If true, local logging is by default enabled for all filters. ## If true, local logging is by default enabled for all filters.
const enable_local_logging = T &redef; const enable_local_logging = T &redef;
@ -27,13 +33,13 @@ export {
const set_separator = "," &redef; const set_separator = "," &redef;
## String to use for empty fields. This should be different from ## String to use for empty fields. This should be different from
## *unset_field* to make the output unambiguous. ## *unset_field* to make the output unambiguous.
## Can be overwritten by individual writers. ## Can be overwritten by individual writers.
const empty_field = "(empty)" &redef; const empty_field = "(empty)" &redef;
## String to use for an unset &optional field. ## String to use for an unset &optional field.
## Can be overwritten by individual writers. ## Can be overwritten by individual writers.
const unset_field = "-" &redef; const unset_field = "-" &redef;
## Type defining the content of a logging stream. ## Type defining the content of a logging stream.
type Stream: record { type Stream: record {

View file

@ -26,20 +26,20 @@ export {
## This option is also available as a per-filter ``$config`` option. ## This option is also available as a per-filter ``$config`` option.
const use_json = F &redef; const use_json = F &redef;
## Format of timestamps when writing out JSON. By default, the JSON formatter will ## Format of timestamps when writing out JSON. By default, the JSON
## use double values for timestamps which represent the number of seconds from the ## formatter will use double values for timestamps which represent the
## UNIX epoch. ## number of seconds from the UNIX epoch.
const json_timestamps: JSON::TimestampFormat = JSON::TS_EPOCH &redef; const json_timestamps: JSON::TimestampFormat = JSON::TS_EPOCH &redef;
## If true, include lines with log meta information such as column names ## If true, include lines with log meta information such as column names
## with types, the values of ASCII logging options that are in use, and ## with types, the values of ASCII logging options that are in use, and
## the time when the file was opened and closed (the latter at the end). ## the time when the file was opened and closed (the latter at the end).
## ##
## If writing in JSON format, this is implicitly disabled. ## If writing in JSON format, this is implicitly disabled.
const include_meta = T &redef; const include_meta = T &redef;
## Prefix for lines with meta information. ## Prefix for lines with meta information.
## ##
## This option is also available as a per-filter ``$config`` option. ## This option is also available as a per-filter ``$config`` option.
const meta_prefix = "#" &redef; const meta_prefix = "#" &redef;

View file

@ -1,60 +0,0 @@
##! Interface for the DataSeries log writer.
module LogDataSeries;
export {
## Compression to use with the DS output file. Options are:
##
## 'none' -- No compression.
## 'lzf' -- LZF compression (very quick, but leads to larger output files).
## 'lzo' -- LZO compression (very fast decompression times).
## 'gz' -- GZIP compression (slower than LZF, but also produces smaller output).
## 'bz2' -- BZIP2 compression (slower than GZIP, but also produces smaller output).
const compression = "gz" &redef;
## The extent buffer size.
## Larger values here lead to better compression and more efficient writes,
## but also increase the lag between the time events are received and
## the time they are actually written to disk.
const extent_size = 65536 &redef;
## Should we dump the XML schema we use for this DS file to disk?
## If yes, the XML schema shares the name of the logfile, but has
## an XML ending.
const dump_schema = F &redef;
## How many threads should DataSeries spawn to perform compression?
## Note that this dictates the number of threads per log stream. If
## you're using a lot of streams, you may want to keep this number
## relatively small.
##
## Default value is 1, which will spawn one thread / stream.
##
## Maximum is 128, minimum is 1.
const num_threads = 1 &redef;
## Should time be stored as an integer or a double?
## Storing time as a double leads to possible precision issues and
## can (significantly) increase the size of the resulting DS log.
## That said, timestamps stored in double form are consistent
## with the rest of Bro, including the standard ASCII log. Hence, we
## use them by default.
const use_integer_for_time = F &redef;
}
# Default function to postprocess a rotated DataSeries log file. It moves the
# rotated file to a new name that includes a timestamp with the opening time,
# and then runs the writer's default postprocessor command on it.
function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool
{
# Move file to name including both opening and closing time.
local dst = fmt("%s.%s.ds", info$path,
strftime(Log::default_rotation_date_format, info$open));
system(fmt("/bin/mv %s %s", info$fname, dst));
# Run default postprocessor.
return Log::run_rotation_postprocessor_cmd(info, dst);
}
redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func };

View file

@ -1,48 +0,0 @@
##! Log writer for sending logs to an ElasticSearch server.
##!
##! Note: This module is in testing and is not yet considered stable!
##!
##! There is one known memory issue. If your elasticsearch server is
##! running slowly and taking too long to return from bulk insert
##! requests, the message queue to the writer thread will continue
##! growing larger and larger giving the appearance of a memory leak.
module LogElasticSearch;
export {
## Name of the ES cluster.
const cluster_name = "elasticsearch" &redef;
## ES server.
const server_host = "127.0.0.1" &redef;
## ES port.
const server_port = 9200 &redef;
## Name of the ES index.
const index_prefix = "bro" &redef;
## The ES type prefix comes before the name of the related log.
## e.g. prefix = "bro\_" would create types of bro_dns, bro_software, etc.
const type_prefix = "" &redef;
## The time before an ElasticSearch transfer will timeout. Note that
## the fractional part of the timeout will be ignored. In particular,
## time specifications less than a second result in a timeout value of
## 0, which means "no timeout."
const transfer_timeout = 2secs;
## The batch size is the number of messages that will be queued up before
## they are sent to be bulk indexed.
const max_batch_size = 1000 &redef;
## The maximum amount of wall-clock time that is allowed to pass without
## finishing a bulk log send. This represents the maximum delay you
## would like to have with your logs before they are sent to ElasticSearch.
const max_batch_interval = 1min &redef;
## The maximum byte size for a buffered JSON string to send to the bulk
## insert API.
const max_byte_size = 1024 * 1024 &redef;
}

View file

@ -20,7 +20,8 @@ export {
## category along with the specific notice separating words with ## category along with the specific notice separating words with
## underscores and using leading capitals on each word except for ## underscores and using leading capitals on each word except for
## abbreviations which are kept in all capitals. For example, ## abbreviations which are kept in all capitals. For example,
## SSH::Login is for heuristically guessed successful SSH logins. ## SSH::Password_Guessing is for hosts that have crossed a threshold of
## heuristically determined failed SSH logins.
type Type: enum { type Type: enum {
## Notice reporting a count of how often a notice occurred. ## Notice reporting a count of how often a notice occurred.
Tally, Tally,

View file

@ -185,6 +185,7 @@ export {
["RPC_underflow"] = ACTION_LOG, ["RPC_underflow"] = ACTION_LOG,
["RST_storm"] = ACTION_LOG, ["RST_storm"] = ACTION_LOG,
["RST_with_data"] = ACTION_LOG, ["RST_with_data"] = ACTION_LOG,
["SSL_many_server_names"] = ACTION_LOG,
["simultaneous_open"] = ACTION_LOG_PER_CONN, ["simultaneous_open"] = ACTION_LOG_PER_CONN,
["spontaneous_FIN"] = ACTION_IGNORE, ["spontaneous_FIN"] = ACTION_IGNORE,
["spontaneous_RST"] = ACTION_IGNORE, ["spontaneous_RST"] = ACTION_IGNORE,

View file

@ -70,6 +70,9 @@ export {
## The network time at which a signature matching type of event ## The network time at which a signature matching type of event
## to be logged has occurred. ## to be logged has occurred.
ts: time &log; ts: time &log;
## A unique identifier of the connection which triggered the
## signature match event.
uid: string &log &optional;
## The host which triggered the signature match event. ## The host which triggered the signature match event.
src_addr: addr &log &optional; src_addr: addr &log &optional;
## The host port on which the signature-matching activity ## The host port on which the signature-matching activity
@ -167,7 +170,7 @@ event signature_match(state: signature_state, msg: string, data: string)
# Trim the matched data down to something reasonable # Trim the matched data down to something reasonable
if ( |data| > 140 ) if ( |data| > 140 )
data = fmt("%s...", sub_bytes(data, 0, 140)); data = fmt("%s...", sub_bytes(data, 0, 140));
local src_addr: addr; local src_addr: addr;
local src_port: port; local src_port: port;
local dst_addr: addr; local dst_addr: addr;
@ -192,6 +195,7 @@ event signature_match(state: signature_state, msg: string, data: string)
{ {
local info: Info = [$ts=network_time(), local info: Info = [$ts=network_time(),
$note=Sensitive_Signature, $note=Sensitive_Signature,
$uid=state$conn$uid,
$src_addr=src_addr, $src_addr=src_addr,
$src_port=src_port, $src_port=src_port,
$dst_addr=dst_addr, $dst_addr=dst_addr,
@ -212,11 +216,11 @@ event signature_match(state: signature_state, msg: string, data: string)
if ( ++count_per_resp[dst,sig_id] in count_thresholds ) if ( ++count_per_resp[dst,sig_id] in count_thresholds )
{ {
NOTICE([$note=Count_Signature, $conn=state$conn, NOTICE([$note=Count_Signature, $conn=state$conn,
$msg=msg, $msg=msg,
$n=count_per_resp[dst,sig_id], $n=count_per_resp[dst,sig_id],
$sub=fmt("%d matches of signature %s on host %s", $sub=fmt("%d matches of signature %s on host %s",
count_per_resp[dst,sig_id], count_per_resp[dst,sig_id],
sig_id, dst)]); sig_id, dst)]);
} }
} }
@ -290,16 +294,16 @@ event signature_match(state: signature_state, msg: string, data: string)
orig, vcount, resp); orig, vcount, resp);
Log::write(Signatures::LOG, Log::write(Signatures::LOG,
[$ts=network_time(), [$ts=network_time(),
$note=Multiple_Signatures, $note=Multiple_Signatures,
$src_addr=orig, $src_addr=orig,
$dst_addr=resp, $sig_id=sig_id, $sig_count=vcount, $dst_addr=resp, $sig_id=sig_id, $sig_count=vcount,
$event_msg=fmt("%s different signatures triggered", vcount), $event_msg=fmt("%s different signatures triggered", vcount),
$sub_msg=vert_scan_msg]); $sub_msg=vert_scan_msg]);
NOTICE([$note=Multiple_Signatures, $src=orig, $dst=resp, NOTICE([$note=Multiple_Signatures, $src=orig, $dst=resp,
$msg=fmt("%s different signatures triggered", vcount), $msg=fmt("%s different signatures triggered", vcount),
$n=vcount, $sub=vert_scan_msg]); $n=vcount, $sub=vert_scan_msg]);
last_vthresh[orig] = vcount; last_vthresh[orig] = vcount;
} }

View file

@ -287,6 +287,13 @@ function parse_mozilla(unparsed_version: string): Description
if ( 2 in parts ) if ( 2 in parts )
v = parse(parts[2])$version; v = parse(parts[2])$version;
} }
else if ( / Java\/[0-9]\./ in unparsed_version )
{
software_name = "Java";
parts = split_all(unparsed_version, /Java\/[0-9\._]*/);
if ( 2 in parts )
v = parse(parts[2])$version;
}
return [$version=v, $unparsed_version=unparsed_version, $name=software_name]; return [$version=v, $unparsed_version=unparsed_version, $name=software_name];
} }

View file

@ -28,10 +28,6 @@ export {
## values for a sumstat. ## values for a sumstat.
global cluster_ss_request: event(uid: string, ss_name: string, cleanup: bool); global cluster_ss_request: event(uid: string, ss_name: string, cleanup: bool);
# Event sent by nodes that are collecting sumstats after receiving a
# request for the sumstat from the manager.
#global cluster_ss_response: event(uid: string, ss_name: string, data: ResultTable, done: bool, cleanup: bool);
## This event is sent by the manager in a cluster to initiate the ## This event is sent by the manager in a cluster to initiate the
## collection of a single key value from a sumstat. It's typically used ## collection of a single key value from a sumstat. It's typically used
## to get intermediate updates before the break interval triggers to ## to get intermediate updates before the break interval triggers to
@ -62,7 +58,7 @@ export {
# Add events to the cluster framework to make this work. # Add events to the cluster framework to make this work.
redef Cluster::manager2worker_events += /SumStats::cluster_(ss_request|get_result|threshold_crossed)/; redef Cluster::manager2worker_events += /SumStats::cluster_(ss_request|get_result|threshold_crossed)/;
redef Cluster::manager2worker_events += /SumStats::(get_a_key)/; redef Cluster::manager2worker_events += /SumStats::(get_a_key)/;
redef Cluster::worker2manager_events += /SumStats::cluster_(ss_response|send_result|key_intermediate_response)/; redef Cluster::worker2manager_events += /SumStats::cluster_(send_result|key_intermediate_response)/;
redef Cluster::worker2manager_events += /SumStats::(send_a_key|send_no_key)/; redef Cluster::worker2manager_events += /SumStats::(send_a_key|send_no_key)/;
@if ( Cluster::local_node_type() != Cluster::MANAGER ) @if ( Cluster::local_node_type() != Cluster::MANAGER )
@ -74,7 +70,7 @@ global recent_global_view_keys: table[string, Key] of count &create_expire=1min
# Result tables indexed on a uid that are currently being sent to the # Result tables indexed on a uid that are currently being sent to the
# manager. # manager.
global sending_results: table[string] of ResultTable = table() &create_expire=1min; global sending_results: table[string] of ResultTable = table() &read_expire=1min;
# This is done on all non-manager node types in the event that a sumstat is # This is done on all non-manager node types in the event that a sumstat is
# being collected somewhere other than a worker. # being collected somewhere other than a worker.
@ -144,7 +140,7 @@ event SumStats::cluster_ss_request(uid: string, ss_name: string, cleanup: bool)
sending_results[uid] = (ss_name in result_store) ? result_store[ss_name] : table(); sending_results[uid] = (ss_name in result_store) ? result_store[ss_name] : table();
# Lookup the actual sumstats and reset it, the reference to the data # Lookup the actual sumstats and reset it, the reference to the data
# currently stored will be maintained internally from the # currently stored will be maintained internally from the
# sending_results table. # sending_results table.
if ( cleanup && ss_name in stats_store ) if ( cleanup && ss_name in stats_store )
reset(stats_store[ss_name]); reset(stats_store[ss_name]);
@ -159,7 +155,7 @@ event SumStats::cluster_get_result(uid: string, ss_name: string, key: Key, clean
if ( uid in sending_results && key in sending_results[uid] ) if ( uid in sending_results && key in sending_results[uid] )
{ {
# Note: copy is needed to compensate serialization caching issue. This should be # Note: copy is needed to compensate serialization caching issue. This should be
# changed to something else later. # changed to something else later.
event SumStats::cluster_send_result(uid, ss_name, key, copy(sending_results[uid][key]), cleanup); event SumStats::cluster_send_result(uid, ss_name, key, copy(sending_results[uid][key]), cleanup);
delete sending_results[uid][key]; delete sending_results[uid][key];
} }
@ -170,12 +166,12 @@ event SumStats::cluster_get_result(uid: string, ss_name: string, key: Key, clean
event SumStats::cluster_send_result(uid, ss_name, key, table(), cleanup); event SumStats::cluster_send_result(uid, ss_name, key, table(), cleanup);
} }
} }
else else
{ {
if ( ss_name in result_store && key in result_store[ss_name] ) if ( ss_name in result_store && key in result_store[ss_name] )
{ {
# Note: copy is needed to compensate serialization caching issue. This should be # Note: copy is needed to compensate serialization caching issue. This should be
# changed to something else later. # changed to something else later.
event SumStats::cluster_send_result(uid, ss_name, key, copy(result_store[ss_name][key]), cleanup); event SumStats::cluster_send_result(uid, ss_name, key, copy(result_store[ss_name][key]), cleanup);
} }
else else
@ -195,6 +191,19 @@ event SumStats::cluster_threshold_crossed(ss_name: string, key: SumStats::Key, t
threshold_tracker[ss_name][key] = thold_index; threshold_tracker[ss_name][key] = thold_index;
} }
# request-key is a non-op on the workers.
# It only should be called by the manager. Due to the fact that we usually run the same scripts on the
# workers and the manager, it might also be called by the workers, so we just ignore it here.
#
# There is a small chance that people will try running it on events that are just thrown on the workers.
# This does not work at the moment and we cannot throw an error message, because we cannot distinguish it
# from the "script is running it everywhere" case. But - people should notice that they do not get results.
# Not entirely pretty, sorry :(
function request_key(ss_name: string, key: Key): Result
{
return Result();
}
@endif @endif
@ -203,7 +212,7 @@ event SumStats::cluster_threshold_crossed(ss_name: string, key: SumStats::Key, t
# This variable is maintained by manager nodes as they collect and aggregate # This variable is maintained by manager nodes as they collect and aggregate
# results. # results.
# Index on a uid. # Index on a uid.
global stats_keys: table[string] of set[Key] &create_expire=1min global stats_keys: table[string] of set[Key] &read_expire=1min
&expire_func=function(s: table[string] of set[Key], idx: string): interval &expire_func=function(s: table[string] of set[Key], idx: string): interval
{ {
Reporter::warning(fmt("SumStat key request for the %s SumStat uid took longer than 1 minute and was automatically cancelled.", idx)); Reporter::warning(fmt("SumStat key request for the %s SumStat uid took longer than 1 minute and was automatically cancelled.", idx));
@ -215,17 +224,16 @@ global stats_keys: table[string] of set[Key] &create_expire=1min
# matches the number of peer nodes that results should be coming from, the # matches the number of peer nodes that results should be coming from, the
# result is written out and deleted from here. # result is written out and deleted from here.
# Indexed on a uid. # Indexed on a uid.
# TODO: add an &expire_func in case not all results are received. global done_with: table[string] of count &read_expire=1min &default=0;
global done_with: table[string] of count &create_expire=1min &default=0;
# This variable is maintained by managers to track intermediate responses as # This variable is maintained by managers to track intermediate responses as
# they are getting a global view for a certain key. # they are getting a global view for a certain key.
# Indexed on a uid. # Indexed on a uid.
global key_requests: table[string] of Result &create_expire=1min; global key_requests: table[string] of Result &read_expire=1min;
# Store uids for dynamic requests here to avoid cleanup on the uid. # Store uids for dynamic requests here to avoid cleanup on the uid.
# (This needs to be done differently!) # (This needs to be done differently!)
global dynamic_requests: set[string] &create_expire=1min; global dynamic_requests: set[string] &read_expire=1min;
# This variable is maintained by managers to prevent overwhelming communication due # This variable is maintained by managers to prevent overwhelming communication due
# to too many intermediate updates. Each sumstat is tracked separately so that # to too many intermediate updates. Each sumstat is tracked separately so that
@ -414,7 +422,7 @@ event SumStats::cluster_send_result(uid: string, ss_name: string, key: Key, resu
# Mark that a worker is done. # Mark that a worker is done.
if ( uid !in done_with ) if ( uid !in done_with )
done_with[uid] = 0; done_with[uid] = 0;
#print fmt("MANAGER: got a result for %s %s from %s", uid, key, get_event_peer()$descr); #print fmt("MANAGER: got a result for %s %s from %s", uid, key, get_event_peer()$descr);
++done_with[uid]; ++done_with[uid];

View file

@ -54,6 +54,13 @@ type any_vec: vector of any;
## directly and then remove this alias. ## directly and then remove this alias.
type string_vec: vector of string; type string_vec: vector of string;
## A vector of x509 opaques.
##
## .. todo:: We need this type definition only for declaring builtin functions
## via ``bifcl``. We should extend ``bifcl`` to understand composite types
## directly and then remove this alias.
type x509_opaque_vector: vector of opaque of x509;
## A vector of addresses. ## A vector of addresses.
## ##
## .. todo:: We need this type definition only for declaring builtin functions ## .. todo:: We need this type definition only for declaring builtin functions
@ -68,6 +75,30 @@ type addr_vec: vector of addr;
## directly and then remove this alias. ## directly and then remove this alias.
type table_string_of_string: table[string] of string; type table_string_of_string: table[string] of string;
## A set of file analyzer tags.
##
## .. todo:: We need this type definition only for declaring builtin functions
## via ``bifcl``. We should extend ``bifcl`` to understand composite types
## directly and then remove this alias.
type files_tag_set: set[Files::Tag];
## A structure indicating a MIME type and strength of a match against
## file magic signatures.
##
## :bro:see:`file_magic`
type mime_match: record {
strength: int; ##< How strongly the signature matched. Used for
##< prioritization when multiple file magic signatures
##< match.
mime: string; ##< The MIME type of the file magic signature match.
};
## A vector of file magic signature matches, ordered by strength of
## the signature, strongest first.
##
## :bro:see:`file_magic`
type mime_matches: vector of mime_match;
## A connection's transport-layer protocol. Note that Bro uses the term ## A connection's transport-layer protocol. Note that Bro uses the term
## "connection" broadly, using flow semantics for ICMP and UDP. ## "connection" broadly, using flow semantics for ICMP and UDP.
type transport_proto: enum { type transport_proto: enum {
@ -379,10 +410,15 @@ type fa_file: record {
## This is also the buffer that's used for file/mime type detection. ## This is also the buffer that's used for file/mime type detection.
bof_buffer: string &optional; bof_buffer: string &optional;
## A mime type provided by libmagic against the *bof_buffer*, or ## The mime type of the strongest file magic signature matches against
## in the cases where no buffering of the beginning of file occurs, ## the data chunk in *bof_buffer*, or in the cases where no buffering
## an initial guess of the mime type based on the first data seen. ## of the beginning of file occurs, an initial guess of the mime type
## based on the first data seen.
mime_type: string &optional; mime_type: string &optional;
## All mime types that matched file magic signatures against the data
## chunk in *bof_buffer*, in order of their strength value.
mime_types: mime_matches &optional;
} &redef; } &redef;
## Fields of a SYN packet. ## Fields of a SYN packet.
@ -2421,29 +2457,6 @@ global dns_skip_all_addl = T &redef;
## traffic and do not process it. Set to 0 to turn off this functionality. ## traffic and do not process it. Set to 0 to turn off this functionality.
global dns_max_queries = 5; global dns_max_queries = 5;
## An X509 certificate.
##
## .. bro:see:: x509_certificate
type X509: record {
version: count; ##< Version number.
serial: string; ##< Serial number.
subject: string; ##< Subject.
issuer: string; ##< Issuer.
not_valid_before: time; ##< Timestamp before when certificate is not valid.
not_valid_after: time; ##< Timestamp after when certificate is not valid.
};
## An X509 extension.
##
## .. bro:see:: x509_extension
type X509_extension_info: record {
name: string; ##< Long name of extension; oid if name not known.
short_name: string &optional; ##< Short name of extension if known.
oid: string; ##< Oid of extension.
critical: bool; ##< True if extension is critical.
value: string; ##< Extension content parsed to string for known extensions. Raw data otherwise.
};
## HTTP session statistics. ## HTTP session statistics.
## ##
## .. bro:see:: http_stats ## .. bro:see:: http_stats
@ -2472,8 +2485,7 @@ type http_message_stat: record {
header_length: count; header_length: count;
}; };
## Maximum number of HTTP entity data delivered to events. The amount of data ## Maximum number of HTTP entity data delivered to events.
## can be limited for better performance, zero disables truncation.
## ##
## .. bro:see:: http_entity_data skip_http_entity_data skip_http_data ## .. bro:see:: http_entity_data skip_http_entity_data skip_http_data
global http_entity_data_delivery_size = 1500 &redef; global http_entity_data_delivery_size = 1500 &redef;
@ -2725,6 +2737,7 @@ type ModbusRegisters: vector of count;
type ModbusHeaders: record { type ModbusHeaders: record {
tid: count; tid: count;
pid: count; pid: count;
len: count;
uid: count; uid: count;
function_code: count; function_code: count;
}; };
@ -2765,6 +2778,55 @@ export {
}; };
} }
module X509;
export {
type Certificate: record {
version: count; ##< Version number.
serial: string; ##< Serial number.
subject: string; ##< Subject.
issuer: string; ##< Issuer.
not_valid_before: time; ##< Timestamp before when certificate is not valid.
not_valid_after: time; ##< Timestamp after when certificate is not valid.
key_alg: string; ##< Name of the key algorithm
sig_alg: string; ##< Name of the signature algorithm
key_type: string &optional; ##< Key type, if key parseable by openssl (either rsa, dsa or ec)
key_length: count &optional; ##< Key length in bits
exponent: string &optional; ##< Exponent, if RSA-certificate
curve: string &optional; ##< Curve, if EC-certificate
} &log;
type Extension: record {
name: string; ##< Long name of extension. oid if name not known
short_name: string &optional; ##< Short name of extension if known
oid: string; ##< Oid of extension
critical: bool; ##< True if extension is critical
value: string; ##< Extension content parsed to string for known extensions. Raw data otherwise.
};
type BasicConstraints: record {
ca: bool; ##< CA flag set?
path_len: count &optional; ##< Maximum path length
} &log;
type SubjectAlternativeName: record {
dns: string_vec &optional &log; ##< List of DNS entries in SAN
uri: string_vec &optional &log; ##< List of URI entries in SAN
email: string_vec &optional &log; ##< List of email entries in SAN
ip: addr_vec &optional &log; ##< List of IP entries in SAN
other_fields: bool; ##< True if the certificate contained other, not recognized or parsed name fields
};
## Result of an X509 certificate chain verification
type Result: record {
## OpenSSL result code
result: int;
## Result as string
result_string: string;
## References to the final certificate chain, if verification successful. End-host certificate is first.
chain_certs: vector of opaque of x509 &optional;
};
}
module SOCKS; module SOCKS;
export { export {
## This record is for a SOCKS client or server to provide either a ## This record is for a SOCKS client or server to provide either a
@ -2774,6 +2836,148 @@ export {
name: string &optional; name: string &optional;
} &log; } &log;
} }
module RADIUS;
export {
type RADIUS::AttributeList: vector of string;
type RADIUS::Attributes: table[count] of RADIUS::AttributeList;
type RADIUS::Message: record {
## The type of message (Access-Request, Access-Accept, etc.).
code : count;
## The transaction ID.
trans_id : count;
## The "authenticator" string.
authenticator : string;
## Any attributes.
attributes : RADIUS::Attributes &optional;
};
}
module GLOBAL;
@load base/bif/plugins/Bro_SNMP.types.bif
module SNMP;
export {
## The top-level message data structure of an SNMPv1 datagram, not
## including the PDU data. See :rfc:`1157`.
type SNMP::HeaderV1: record {
community: string;
};
## The top-level message data structure of an SNMPv2 datagram, not
## including the PDU data. See :rfc:`1901`.
type SNMP::HeaderV2: record {
community: string;
};
## The ``ScopedPduData`` data structure of an SNMPv3 datagram, not
## including the PDU data (i.e. just the "context" fields).
## See :rfc:`3412`.
type SNMP::ScopedPDU_Context: record {
engine_id: string;
name: string;
};
## The top-level message data structure of an SNMPv3 datagram, not
## including the PDU data. See :rfc:`3412`.
type SNMP::HeaderV3: record {
id: count;
max_size: count;
flags: count;
auth_flag: bool;
priv_flag: bool;
reportable_flag: bool;
security_model: count;
security_params: string;
pdu_context: SNMP::ScopedPDU_Context &optional;
};
## A generic SNMP header data structure that may include data from
## any version of SNMP. The value of the ``version`` field
## determines what header field is initialized.
type SNMP::Header: record {
version: count;
v1: SNMP::HeaderV1 &optional; ##< Set when ``version`` is 0.
v2: SNMP::HeaderV2 &optional; ##< Set when ``version`` is 1.
v3: SNMP::HeaderV3 &optional; ##< Set when ``version`` is 3.
};
## A generic SNMP object value, that may include any of the
## valid ``ObjectSyntax`` values from :rfc:`1155` or :rfc:`3416`.
## The value is decoded whenever possible and assigned to
## the appropriate field, which can be determined from the value
## of the ``tag`` field. For tags that can't be mapped to an
## appropriate type, the ``octets`` field holds the BER encoded
## ASN.1 content if there is any (though, ``octets`` is may also
## be used for other tags such as OCTET STRINGS or Opaque). Null
## values will only have their corresponding tag value set.
type SNMP::ObjectValue: record {
tag: count;
oid: string &optional;
signed: int &optional;
unsigned: count &optional;
address: addr &optional;
octets: string &optional;
};
# These aren't an enum because it's easier to type fields as count.
# That way don't have to deal with type conversion, plus doesn't
# mislead that these are the only valid tag values (it's just the set
# of known tags).
const SNMP::OBJ_INTEGER_TAG : count = 0x02; ##< Signed 64-bit integer.
const SNMP::OBJ_OCTETSTRING_TAG : count = 0x04; ##< An octet string.
const SNMP::OBJ_UNSPECIFIED_TAG : count = 0x05; ##< A NULL value.
const SNMP::OBJ_OID_TAG : count = 0x06; ##< An Object Identifier.
const SNMP::OBJ_IPADDRESS_TAG : count = 0x40; ##< An IP address.
const SNMP::OBJ_COUNTER32_TAG : count = 0x41; ##< Unsigned 32-bit integer.
const SNMP::OBJ_UNSIGNED32_TAG : count = 0x42; ##< Unsigned 32-bit integer.
const SNMP::OBJ_TIMETICKS_TAG : count = 0x43; ##< Unsigned 32-bit integer.
const SNMP::OBJ_OPAQUE_TAG : count = 0x44; ##< An octet string.
const SNMP::OBJ_COUNTER64_TAG : count = 0x46; ##< Unsigned 64-bit integer.
const SNMP::OBJ_NOSUCHOBJECT_TAG : count = 0x80; ##< A NULL value.
const SNMP::OBJ_NOSUCHINSTANCE_TAG: count = 0x81; ##< A NULL value.
const SNMP::OBJ_ENDOFMIBVIEW_TAG : count = 0x82; ##< A NULL value.
## The ``VarBind`` data structure from either :rfc:`1157` or
## :rfc:`3416`, which maps an Object Identifier to a value.
type SNMP::Binding: record {
oid: string;
value: SNMP::ObjectValue;
};
## A ``VarBindList`` data structure from either :rfc:`1157` or :rfc:`3416`.
## A sequences of :bro:see:`SNMP::Binding`, which maps an OIDs to values.
type SNMP::Bindings: vector of SNMP::Binding;
## A ``PDU`` data structure from either :rfc:`1157` or :rfc:`3416`.
type SNMP::PDU: record {
request_id: int;
error_status: int;
error_index: int;
bindings: SNMP::Bindings;
};
## A ``Trap-PDU`` data structure from :rfc:`1157`.
type SNMP::TrapPDU: record {
enterprise: string;
agent: addr;
generic_trap: int;
specific_trap: int;
time_stamp: count;
bindings: SNMP::Bindings;
};
## A ``BulkPDU`` data structure from :rfc:`3416`.
type SNMP::BulkPDU: record {
request_id: int;
non_repeaters: count;
max_repititions: count;
bindings: SNMP::Bindings;
};
}
module GLOBAL; module GLOBAL;
@load base/bif/event.bif @load base/bif/event.bif
@ -3159,9 +3363,6 @@ const global_hash_seed: string = "" &redef;
## The maximum is currently 128 bits. ## The maximum is currently 128 bits.
const bits_per_uid: count = 96 &redef; const bits_per_uid: count = 96 &redef;
# Load BiFs defined by plugins.
@load base/bif/plugins
# Load these frameworks here because they use fairly deep integration with # Load these frameworks here because they use fairly deep integration with
# BiFs and script-land defined types. # BiFs and script-land defined types.
@load base/frameworks/logging @load base/frameworks/logging
@ -3170,3 +3371,7 @@ const bits_per_uid: count = 96 &redef;
@load base/frameworks/files @load base/frameworks/files
@load base/bif @load base/bif
# Load BiFs defined by plugins.
@load base/bif/plugins

View file

@ -46,7 +46,10 @@
@load base/protocols/http @load base/protocols/http
@load base/protocols/irc @load base/protocols/irc
@load base/protocols/modbus @load base/protocols/modbus
@load base/protocols/mysql
@load base/protocols/pop3 @load base/protocols/pop3
@load base/protocols/radius
@load base/protocols/snmp
@load base/protocols/smtp @load base/protocols/smtp
@load base/protocols/socks @load base/protocols/socks
@load base/protocols/ssh @load base/protocols/ssh
@ -57,7 +60,7 @@
@load base/files/hash @load base/files/hash
@load base/files/extract @load base/files/extract
@load base/files/unified2 @load base/files/unified2
@load base/files/x509
@load base/misc/find-checksum-offloading @load base/misc/find-checksum-offloading
@load base/misc/find-filtered-trace @load base/misc/find-filtered-trace

View file

@ -47,13 +47,13 @@ redef record connection += {
const ports = { 67/udp, 68/udp }; const ports = { 67/udp, 68/udp };
redef likely_server_ports += { 67/udp }; redef likely_server_ports += { 67/udp };
event bro_init() event bro_init() &priority=5
{ {
Log::create_stream(DHCP::LOG, [$columns=Info, $ev=log_dhcp]); Log::create_stream(DHCP::LOG, [$columns=Info, $ev=log_dhcp]);
Analyzer::register_for_ports(Analyzer::ANALYZER_DHCP, ports); Analyzer::register_for_ports(Analyzer::ANALYZER_DHCP, ports);
} }
event dhcp_ack(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_list, lease: interval, serv_addr: addr, host_name: string) event dhcp_ack(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_list, lease: interval, serv_addr: addr, host_name: string) &priority=5
{ {
local info: Info; local info: Info;
info$ts = network_time(); info$ts = network_time();
@ -71,6 +71,9 @@ event dhcp_ack(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_lis
info$assigned_ip = c$id$orig_h; info$assigned_ip = c$id$orig_h;
c$dhcp = info; c$dhcp = info;
}
event dhcp_ack(c: connection, msg: dhcp_msg, mask: addr, router: dhcp_router_list, lease: interval, serv_addr: addr, host_name: string) &priority=-5
{
Log::write(DHCP::LOG, c$dhcp); Log::write(DHCP::LOG, c$dhcp);
} }

View file

@ -183,7 +183,7 @@ function log_unmatched_msgs(msgs: PendingMessages)
for ( trans_id in msgs ) for ( trans_id in msgs )
log_unmatched_msgs_queue(msgs[trans_id]); log_unmatched_msgs_queue(msgs[trans_id]);
msgs = PendingMessages(); clear_table(msgs);
} }
function enqueue_new_msg(msgs: PendingMessages, id: count, msg: Info) function enqueue_new_msg(msgs: PendingMessages, id: count, msg: Info)
@ -382,9 +382,19 @@ event dns_A_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priori
hook DNS::do_reply(c, msg, ans, fmt("%s", a)); hook DNS::do_reply(c, msg, ans, fmt("%s", a));
} }
event dns_TXT_reply(c: connection, msg: dns_msg, ans: dns_answer, str: string) &priority=5 event dns_TXT_reply(c: connection, msg: dns_msg, ans: dns_answer, strs: string_vec) &priority=5
{ {
hook DNS::do_reply(c, msg, ans, str); local txt_strings: string = "";
for ( i in strs )
{
if ( i > 0 )
txt_strings += " ";
txt_strings += fmt("TXT %d %s", |strs[i]|, strs[i]);
}
hook DNS::do_reply(c, msg, ans, txt_strings);
} }
event dns_AAAA_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5 event dns_AAAA_reply(c: connection, msg: dns_msg, ans: dns_answer, a: addr) &priority=5

View file

@ -72,7 +72,7 @@ event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priori
if ( f$is_orig ) if ( f$is_orig )
{ {
if ( ! c$http?$orig_mime_types ) if ( ! c$http?$orig_fuids )
c$http$orig_fuids = string_vec(f$id); c$http$orig_fuids = string_vec(f$id);
else else
c$http$orig_fuids[|c$http$orig_fuids|] = f$id; c$http$orig_fuids[|c$http$orig_fuids|] = f$id;
@ -87,7 +87,7 @@ event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priori
} }
else else
{ {
if ( ! c$http?$resp_mime_types ) if ( ! c$http?$resp_fuids )
c$http$resp_fuids = string_vec(f$id); c$http$resp_fuids = string_vec(f$id);
else else
c$http$resp_fuids[|c$http$resp_fuids|] = f$id; c$http$resp_fuids[|c$http$resp_fuids|] = f$id;

View file

@ -76,7 +76,7 @@ event irc_dcc_message(c: connection, is_orig: bool,
dcc_expected_transfers[address, p] = c$irc; dcc_expected_transfers[address, p] = c$irc;
} }
event expected_connection_seen(c: connection, a: Analyzer::Tag) &priority=10 event scheduled_analyzer_applied(c: connection, a: Analyzer::Tag) &priority=10
{ {
local id = c$id; local id = c$id;
if ( [id$resp_h, id$resp_p] in dcc_expected_transfers ) if ( [id$resp_h, id$resp_p] in dcc_expected_transfers )

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,38 @@
module MySQL;
export {
const commands: table[count] of string = {
[0] = "sleep",
[1] = "quit",
[2] = "init_db",
[3] = "query",
[4] = "field_list",
[5] = "create_db",
[6] = "drop_db",
[7] = "refresh",
[8] = "shutdown",
[9] = "statistics",
[10] = "process_info",
[11] = "connect",
[12] = "process_kill",
[13] = "debug",
[14] = "ping",
[15] = "time",
[16] = "delayed_insert",
[17] = "change_user",
[18] = "binlog_dump",
[19] = "table_dump",
[20] = "connect_out",
[21] = "register_slave",
[22] = "stmt_prepare",
[23] = "stmt_execute",
[24] = "stmt_send_long_data",
[25] = "stmt_close",
[26] = "stmt_reset",
[27] = "set_option",
[28] = "stmt_fetch",
[29] = "daemon",
[30] = "binlog_dump_gtid",
[31] = "reset_connection",
} &default=function(i: count): string { return fmt("unknown-%d", i); };
}

View file

@ -0,0 +1,116 @@
##! Implements base functionality for MySQL analysis. Generates the mysql.log file.
module MySQL;
@load ./consts
export {
redef enum Log::ID += { mysql::LOG };
type Info: record {
## Timestamp for when the event happened.
ts: time &log;
## Unique ID for the connection.
uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log;
## The command that was issued
cmd: string &log;
## The argument issued to the command
arg: string &log;
## The result (error, OK, etc.) from the server
result: string &log &optional;
## Server message, if any
response: string &log &optional;
};
## Event that can be handled to access the MySQL record as it is sent on
## to the logging framework.
global log_mysql: event(rec: Info);
}
redef record connection += {
mysql: Info &optional;
};
const ports = { 1434/tcp, 3306/tcp };
event bro_init() &priority=5
{
Log::create_stream(mysql::LOG, [$columns=Info, $ev=log_mysql]);
Analyzer::register_for_ports(Analyzer::ANALYZER_MYSQL, ports);
}
event mysql_handshake(c: connection, username: string)
{
if ( ! c?$mysql )
{
local info: Info;
info$ts = network_time();
info$uid = c$uid;
info$id = c$id;
info$cmd = "login";
info$arg = username;
c$mysql = info;
}
}
event mysql_command_request(c: connection, command: count, arg: string) &priority=5
{
if ( ! c?$mysql )
{
local info: Info;
info$ts = network_time();
info$uid = c$uid;
info$id = c$id;
info$cmd = commands[command];
info$arg = sub(arg, /\0$/, "");
c$mysql = info;
}
}
event mysql_command_request(c: connection, command: count, arg: string) &priority=-5
{
if ( c?$mysql && c$mysql?$cmd && c$mysql$cmd == "quit" )
{
# We get no response for quits, so let's just log it now.
Log::write(mysql::LOG, c$mysql);
delete c$mysql;
}
}
event mysql_error(c: connection, code: count, msg: string) &priority=5
{
if ( c?$mysql )
{
c$mysql$result = "error";
c$mysql$response = msg;
}
}
event mysql_error(c: connection, code: count, msg: string) &priority=-5
{
if ( c?$mysql )
{
Log::write(mysql::LOG, c$mysql);
delete c$mysql;
}
}
event mysql_ok(c: connection, affected_rows: count) &priority=5
{
if ( c?$mysql )
{
c$mysql$result = "ok";
c$mysql$response = fmt("Affected rows: %d", affected_rows);
}
}
event mysql_ok(c: connection, affected_rows: count) &priority=-5
{
if ( c?$mysql )
{
Log::write(mysql::LOG, c$mysql);
delete c$mysql;
}
}

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,231 @@
module RADIUS;
const msg_types: table[count] of string = {
[1] = "Access-Request",
[2] = "Access-Accept",
[3] = "Access-Reject",
[4] = "Accounting-Request",
[5] = "Accounting-Response",
[11] = "Access-Challenge",
[12] = "Status-Server",
[13] = "Status-Client",
} &default=function(i: count): string { return fmt("unknown-%d", i); };
const attr_types: table[count] of string = {
[1] = "User-Name",
[2] = "User-Password",
[3] = "CHAP-Password",
[4] = "NAS-IP-Address",
[5] = "NAS-Port",
[6] = "Service-Type",
[7] = "Framed-Protocol",
[8] = "Framed-IP-Address",
[9] = "Framed-IP-Netmask",
[10] = "Framed-Routing",
[11] = "Filter-Id",
[12] = "Framed-MTU",
[13] = "Framed-Compression",
[14] = "Login-IP-Host",
[15] = "Login-Service",
[16] = "Login-TCP-Port",
[18] = "Reply-Message",
[19] = "Callback-Number",
[20] = "Callback-Id",
[22] = "Framed-Route",
[23] = "Framed-IPX-Network",
[24] = "State",
[25] = "Class",
[26] = "Vendor-Specific",
[27] = "Session-Timeout",
[28] = "Idle-Timeout",
[29] = "Termination-Action",
[30] = "Called-Station-Id",
[31] = "Calling-Station-Id",
[32] = "NAS-Identifier",
[33] = "Proxy-State",
[34] = "Login-LAT-Service",
[35] = "Login-LAT-Node",
[36] = "Login-LAT-Group",
[37] = "Framed-AppleTalk-Link",
[38] = "Framed-AppleTalk-Network",
[39] = "Framed-AppleTalk-Zone",
[40] = "Acct-Status-Type",
[41] = "Acct-Delay-Time",
[42] = "Acct-Input-Octets",
[43] = "Acct-Output-Octets",
[44] = "Acct-Session-Id",
[45] = "Acct-Authentic",
[46] = "Acct-Session-Time",
[47] = "Acct-Input-Packets",
[48] = "Acct-Output-Packets",
[49] = "Acct-Terminate-Cause",
[50] = "Acct-Multi-Session-Id",
[51] = "Acct-Link-Count",
[52] = "Acct-Input-Gigawords",
[53] = "Acct-Output-Gigawords",
[55] = "Event-Timestamp",
[56] = "Egress-VLANID",
[57] = "Ingress-Filters",
[58] = "Egress-VLAN-Name",
[59] = "User-Priority-Table",
[60] = "CHAP-Challenge",
[61] = "NAS-Port-Type",
[62] = "Port-Limit",
[63] = "Login-LAT-Port",
[64] = "Tunnel-Type",
[65] = "Tunnel-Medium-Type",
[66] = "Tunnel-Client-EndPoint",
[67] = "Tunnel-Server-EndPoint",
[68] = "Acct-Tunnel-Connection",
[69] = "Tunnel-Password",
[70] = "ARAP-Password",
[71] = "ARAP-Features",
[72] = "ARAP-Zone-Access",
[73] = "ARAP-Security",
[74] = "ARAP-Security-Data",
[75] = "Password-Retry",
[76] = "Prompt",
[77] = "Connect-Info",
[78] = "Configuration-Token",
[79] = "EAP-Message",
[80] = "Message Authenticator",
[81] = "Tunnel-Private-Group-ID",
[82] = "Tunnel-Assignment-ID",
[83] = "Tunnel-Preference",
[84] = "ARAP-Challenge-Response",
[85] = "Acct-Interim-Interval",
[86] = "Acct-Tunnel-Packets-Lost",
[87] = "NAS-Port-Id",
[88] = "Framed-Pool",
[89] = "CUI",
[90] = "Tunnel-Client-Auth-ID",
[91] = "Tunnel-Server-Auth-ID",
[92] = "NAS-Filter-Rule",
[94] = "Originating-Line-Info",
[95] = "NAS-IPv6-Address",
[96] = "Framed-Interface-Id",
[97] = "Framed-IPv6-Prefix",
[98] = "Login-IPv6-Host",
[99] = "Framed-IPv6-Route",
[100] = "Framed-IPv6-Pool",
[101] = "Error-Cause",
[102] = "EAP-Key-Name",
[103] = "Digest-Response",
[104] = "Digest-Realm",
[105] = "Digest-Nonce",
[106] = "Digest-Response-Auth",
[107] = "Digest-Nextnonce",
[108] = "Digest-Method",
[109] = "Digest-URI",
[110] = "Digest-Qop",
[111] = "Digest-Algorithm",
[112] = "Digest-Entity-Body-Hash",
[113] = "Digest-CNonce",
[114] = "Digest-Nonce-Count",
[115] = "Digest-Username",
[116] = "Digest-Opaque",
[117] = "Digest-Auth-Param",
[118] = "Digest-AKA-Auts",
[119] = "Digest-Domain",
[120] = "Digest-Stale",
[121] = "Digest-HA1",
[122] = "SIP-AOR",
[123] = "Delegated-IPv6-Prefix",
[124] = "MIP6-Feature-Vector",
[125] = "MIP6-Home-Link-Prefix",
[126] = "Operator-Name",
[127] = "Location-Information",
[128] = "Location-Data",
[129] = "Basic-Location-Policy-Rules",
[130] = "Extended-Location-Policy-Rules",
[131] = "Location-Capable",
[132] = "Requested-Location-Info",
[133] = "Framed-Management-Protocol",
[134] = "Management-Transport-Protection",
[135] = "Management-Policy-Id",
[136] = "Management-Privilege-Level",
[137] = "PKM-SS-Cert",
[138] = "PKM-CA-Cert",
[139] = "PKM-Config-Settings",
[140] = "PKM-Cryptosuite-List",
[141] = "PKM-SAID",
[142] = "PKM-SA-Descriptor",
[143] = "PKM-Auth-Key",
[144] = "DS-Lite-Tunnel-Name",
[145] = "Mobile-Node-Identifier",
[146] = "Service-Selection",
[147] = "PMIP6-Home-LMA-IPv6-Address",
[148] = "PMIP6-Visited-LMA-IPv6-Address",
[149] = "PMIP6-Home-LMA-IPv4-Address",
[150] = "PMIP6-Visited-LMA-IPv4-Address",
[151] = "PMIP6-Home-HN-Prefix",
[152] = "PMIP6-Visited-HN-Prefix",
[153] = "PMIP6-Home-Interface-ID",
[154] = "PMIP6-Visited-Interface-ID",
[155] = "PMIP6-Home-IPv4-HoA",
[156] = "PMIP6-Visited-IPv4-HoA",
[157] = "PMIP6-Home-DHCP4-Server-Address",
[158] = "PMIP6-Visited-DHCP4-Server-Address",
[159] = "PMIP6-Home-DHCP6-Server-Address",
[160] = "PMIP6-Visited-DHCP6-Server-Address",
[161] = "PMIP6-Home-IPv4-Gateway",
[162] = "PMIP6-Visited-IPv4-Gateway",
[163] = "EAP-Lower-Layer",
[164] = "GSS-Acceptor-Service-Name",
[165] = "GSS-Acceptor-Host-Name",
[166] = "GSS-Acceptor-Service-Specifics",
[167] = "GSS-Acceptor-Realm-Name",
[168] = "Framed-IPv6-Address",
[169] = "DNS-Server-IPv6-Address",
[170] = "Route-IPv6-Information",
[171] = "Delegated-IPv6-Prefix-Pool",
[172] = "Stateful-IPv6-Address-Pool",
[173] = "IPv6-6rd-Configuration"
} &default=function(i: count): string { return fmt("unknown-%d", i); };
const nas_port_types: table[count] of string = {
[0] = "Async",
[1] = "Sync",
[2] = "ISDN Sync",
[3] = "ISDN Async V.120",
[4] = "ISDN Async V.110",
[5] = "Virtual",
[6] = "PIAFS",
[7] = "HDLC Clear Channel",
[8] = "X.25",
[9] = "X.75",
[10] = "G.3 Fax",
[11] = "SDSL - Symmetric DSL",
[12] = "ADSL-CAP - Asymmetric DSL, Carrierless Amplitude Phase Modulation",
[13] = "ADSL-DMT - Asymmetric DSL, Discrete Multi-Tone",
[14] = "IDSL - ISDN Digital Subscriber Line",
[15] = "Ethernet",
[16] = "xDSL - Digital Subscriber Line of unknown type",
[17] = "Cable",
[18] = "Wireless - Other",
[19] = "Wireless - IEEE 802.11"
} &default=function(i: count): string { return fmt("unknown-%d", i); };
const service_types: table[count] of string = {
[1] = "Login",
[2] = "Framed",
[3] = "Callback Login",
[4] = "Callback Framed",
[5] = "Outbound",
[6] = "Administrative",
[7] = "NAS Prompt",
[8] = "Authenticate Only",
[9] = "Callback NAS Prompt",
[10] = "Call Check",
[11] = "Callback Administrative",
} &default=function(i: count): string { return fmt("unknown-%d", i); };
const framed_protocol_types: table[count] of string = {
[1] = "PPP",
[2] = "SLIP",
[3] = "AppleTalk Remote Access Protocol (ARAP)",
[4] = "Gandalf proprietary SingleLink/MultiLink protocol",
[5] = "Xylogics proprietary IPX/SLIP",
[6] = "X.75 Synchronous"
} &default=function(i: count): string { return fmt("unknown-%d", i); };

View file

@ -0,0 +1,126 @@
##! Implements base functionality for RADIUS analysis. Generates the radius.log file.
module RADIUS;
@load ./consts.bro
@load base/utils/addrs
export {
redef enum Log::ID += { LOG };
type Info: record {
## Timestamp for when the event happened.
ts : time &log;
## Unique ID for the connection.
uid : string &log;
## The connection's 4-tuple of endpoint addresses/ports.
id : conn_id &log;
## The username, if present.
username : string &log &optional;
## MAC address, if present.
mac : string &log &optional;
## Remote IP address, if present.
remote_ip : addr &log &optional;
## Connect info, if present.
connect_info : string &log &optional;
## Successful or failed authentication.
result : string &log &optional;
## Whether this has already been logged and can be ignored.
logged : bool &optional;
};
## The amount of time we wait for an authentication response before
## expiring it.
const expiration_interval = 10secs &redef;
## Logs an authentication attempt if we didn't see a response in time.
##
## t: A table of Info records.
##
## idx: The index of the connection$radius table corresponding to the
## radius authentication about to expire.
##
## Returns: 0secs, which when this function is used as an
## :bro:attr:`&expire_func`, indicates to remove the element at
## *idx* immediately.
global expire: function(t: table[count] of Info, idx: count): interval;
## Event that can be handled to access the RADIUS record as it is sent on
## to the loggin framework.
global log_radius: event(rec: Info);
}
redef record connection += {
radius: table[count] of Info &optional &write_expire=expiration_interval &expire_func=expire;
};
const ports = { 1812/udp };
event bro_init() &priority=5
{
Log::create_stream(RADIUS::LOG, [$columns=Info, $ev=log_radius]);
Analyzer::register_for_ports(Analyzer::ANALYZER_RADIUS, ports);
}
event radius_message(c: connection, result: RADIUS::Message)
{
local info: Info;
if ( c?$radius && result$trans_id in c$radius )
info = c$radius[result$trans_id];
else
{
c$radius = table();
info$ts = network_time();
info$uid = c$uid;
info$id = c$id;
}
switch ( RADIUS::msg_types[result$code] ) {
case "Access-Request":
if ( result?$attributes ) {
# User-Name
if ( ! info?$username && 1 in result$attributes )
info$username = result$attributes[1][0];
# Calling-Station-Id (we expect this to be a MAC)
if ( ! info?$mac && 31 in result$attributes )
info$mac = normalize_mac(result$attributes[31][0]);
# Tunnel-Client-EndPoint (useful for VPNs)
if ( ! info?$remote_ip && 66 in result$attributes )
info$remote_ip = to_addr(result$attributes[66][0]);
# Connect-Info
if ( ! info?$connect_info && 77 in result$attributes )
info$connect_info = result$attributes[77][0];
}
break;
case "Access-Accept":
info$result = "success";
break;
case "Access-Reject":
info$result = "failed";
break;
}
if ( info?$result && ! info?$logged )
{
info$logged = T;
Log::write(RADIUS::LOG, info);
}
c$radius[result$trans_id] = info;
}
function expire(t: table[count] of Info, idx: count): interval
{
t[idx]$result = "unknown";
Log::write(RADIUS::LOG, t[idx]);
return 0secs;
}

View file

@ -41,13 +41,13 @@ function describe_file(f: fa_file): string
event bro_init() &priority=5 event bro_init() &priority=5
{ {
Files::register_protocol(Analyzer::ANALYZER_SMTP, Files::register_protocol(Analyzer::ANALYZER_SMTP,
[$get_file_handle = SMTP::get_file_handle, [$get_file_handle = SMTP::get_file_handle,
$describe = SMTP::describe_file]); $describe = SMTP::describe_file]);
} }
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5 event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5
{ {
if ( c?$smtp ) if ( c?$smtp && !c$smtp$tls )
c$smtp$fuids[|c$smtp$fuids|] = f$id; c$smtp$fuids[|c$smtp$fuids|] = f$id;
} }

View file

@ -49,38 +49,40 @@ export {
path: vector of addr &log &optional; path: vector of addr &log &optional;
## Value of the User-Agent header from the client. ## Value of the User-Agent header from the client.
user_agent: string &log &optional; user_agent: string &log &optional;
## Indicates that the connection has switched to using TLS.
tls: bool &log &default=F;
## Indicates if the "Received: from" headers should still be ## Indicates if the "Received: from" headers should still be
## processed. ## processed.
process_received_from: bool &default=T; process_received_from: bool &default=T;
## Indicates if client activity has been seen, but not yet logged. ## Indicates if client activity has been seen, but not yet logged.
has_client_activity: bool &default=F; has_client_activity: bool &default=F;
}; };
type State: record { type State: record {
helo: string &optional; helo: string &optional;
## Count the number of individual messages transmitted during ## Count the number of individual messages transmitted during
## this SMTP session. Note, this is not the number of ## this SMTP session. Note, this is not the number of
## recipients, but the number of message bodies transferred. ## recipients, but the number of message bodies transferred.
messages_transferred: count &default=0; messages_transferred: count &default=0;
pending_messages: set[Info] &optional; pending_messages: set[Info] &optional;
}; };
## Direction to capture the full "Received from" path. ## Direction to capture the full "Received from" path.
## REMOTE_HOSTS - only capture the path until an internal host is found. ## REMOTE_HOSTS - only capture the path until an internal host is found.
## LOCAL_HOSTS - only capture the path until the external host is discovered. ## LOCAL_HOSTS - only capture the path until the external host is discovered.
## ALL_HOSTS - always capture the entire path. ## ALL_HOSTS - always capture the entire path.
## NO_HOSTS - never capture the path. ## NO_HOSTS - never capture the path.
const mail_path_capture = ALL_HOSTS &redef; const mail_path_capture = ALL_HOSTS &redef;
## Create an extremely shortened representation of a log line. ## Create an extremely shortened representation of a log line.
global describe: function(rec: Info): string; global describe: function(rec: Info): string;
global log_smtp: event(rec: Info); global log_smtp: event(rec: Info);
} }
redef record connection += { redef record connection += {
smtp: Info &optional; smtp: Info &optional;
smtp_state: State &optional; smtp_state: State &optional;
}; };
@ -93,7 +95,7 @@ event bro_init() &priority=5
Log::create_stream(SMTP::LOG, [$columns=SMTP::Info, $ev=log_smtp]); Log::create_stream(SMTP::LOG, [$columns=SMTP::Info, $ev=log_smtp]);
Analyzer::register_for_ports(Analyzer::ANALYZER_SMTP, ports); Analyzer::register_for_ports(Analyzer::ANALYZER_SMTP, ports);
} }
function find_address_in_smtp_header(header: string): string function find_address_in_smtp_header(header: string): string
{ {
local ips = find_ip_addresses(header); local ips = find_ip_addresses(header);
@ -114,17 +116,17 @@ function new_smtp_log(c: connection): Info
l$ts=network_time(); l$ts=network_time();
l$uid=c$uid; l$uid=c$uid;
l$id=c$id; l$id=c$id;
# The messages_transferred count isn't incremented until the message is # The messages_transferred count isn't incremented until the message is
# finished so we need to increment the count by 1 here. # finished so we need to increment the count by 1 here.
l$trans_depth = c$smtp_state$messages_transferred+1; l$trans_depth = c$smtp_state$messages_transferred+1;
if ( c$smtp_state?$helo ) if ( c$smtp_state?$helo )
l$helo = c$smtp_state$helo; l$helo = c$smtp_state$helo;
# The path will always end with the hosts involved in this connection. # The path will always end with the hosts involved in this connection.
# The lower values in the vector are the end of the path. # The lower values in the vector are the end of the path.
l$path = vector(c$id$resp_h, c$id$orig_h); l$path = vector(c$id$resp_h, c$id$orig_h);
return l; return l;
} }
@ -132,7 +134,7 @@ function set_smtp_session(c: connection)
{ {
if ( ! c?$smtp_state ) if ( ! c?$smtp_state )
c$smtp_state = []; c$smtp_state = [];
if ( ! c?$smtp ) if ( ! c?$smtp )
c$smtp = new_smtp_log(c); c$smtp = new_smtp_log(c);
} }
@ -140,17 +142,17 @@ function set_smtp_session(c: connection)
function smtp_message(c: connection) function smtp_message(c: connection)
{ {
if ( c$smtp$has_client_activity ) if ( c$smtp$has_client_activity )
{
Log::write(SMTP::LOG, c$smtp); Log::write(SMTP::LOG, c$smtp);
c$smtp = new_smtp_log(c);
}
} }
event smtp_request(c: connection, is_orig: bool, command: string, arg: string) &priority=5 event smtp_request(c: connection, is_orig: bool, command: string, arg: string) &priority=5
{ {
set_smtp_session(c); set_smtp_session(c);
local upper_command = to_upper(command); local upper_command = to_upper(command);
if ( upper_command != "QUIT" )
c$smtp$has_client_activity = T;
if ( upper_command == "HELO" || upper_command == "EHLO" ) if ( upper_command == "HELO" || upper_command == "EHLO" )
{ {
c$smtp_state$helo = arg; c$smtp_state$helo = arg;
@ -159,23 +161,28 @@ event smtp_request(c: connection, is_orig: bool, command: string, arg: string) &
else if ( upper_command == "RCPT" && /^[tT][oO]:/ in arg ) else if ( upper_command == "RCPT" && /^[tT][oO]:/ in arg )
{ {
if ( ! c$smtp?$rcptto ) if ( ! c$smtp?$rcptto )
c$smtp$rcptto = set(); c$smtp$rcptto = set();
add c$smtp$rcptto[split1(arg, /:[[:blank:]]*/)[2]]; add c$smtp$rcptto[split1(arg, /:[[:blank:]]*/)[2]];
c$smtp$has_client_activity = T;
} }
else if ( upper_command == "MAIL" && /^[fF][rR][oO][mM]:/ in arg ) else if ( upper_command == "MAIL" && /^[fF][rR][oO][mM]:/ in arg )
{ {
# Flush last message in case we didn't see the server's acknowledgement.
smtp_message(c);
local partially_done = split1(arg, /:[[:blank:]]*/)[2]; local partially_done = split1(arg, /:[[:blank:]]*/)[2];
c$smtp$mailfrom = split1(partially_done, /[[:blank:]]?/)[1]; c$smtp$mailfrom = split1(partially_done, /[[:blank:]]?/)[1];
c$smtp$has_client_activity = T;
} }
} }
event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string, event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string,
msg: string, cont_resp: bool) &priority=5 msg: string, cont_resp: bool) &priority=5
{ {
set_smtp_session(c); set_smtp_session(c);
# This continually overwrites, but we want the last reply, # This continually overwrites, but we want the last reply,
# so this actually works fine. # so this actually works fine.
c$smtp$last_reply = fmt("%d %s", code, msg); c$smtp$last_reply = fmt("%d %s", code, msg);
@ -196,7 +203,6 @@ event smtp_reply(c: connection, is_orig: bool, code: count, cmd: string,
event mime_one_header(c: connection, h: mime_header_rec) &priority=5 event mime_one_header(c: connection, h: mime_header_rec) &priority=5
{ {
if ( ! c?$smtp ) return; if ( ! c?$smtp ) return;
c$smtp$has_client_activity = T;
if ( h$name == "MESSAGE-ID" ) if ( h$name == "MESSAGE-ID" )
c$smtp$msg_id = h$value; c$smtp$msg_id = h$value;
@ -239,19 +245,19 @@ event mime_one_header(c: connection, h: mime_header_rec) &priority=5
if ( 1 in addresses ) if ( 1 in addresses )
c$smtp$x_originating_ip = to_addr(addresses[1]); c$smtp$x_originating_ip = to_addr(addresses[1]);
} }
else if ( h$name == "X-MAILER" || else if ( h$name == "X-MAILER" ||
h$name == "USER-AGENT" || h$name == "USER-AGENT" ||
h$name == "X-USER-AGENT" ) h$name == "X-USER-AGENT" )
c$smtp$user_agent = h$value; c$smtp$user_agent = h$value;
} }
# This event handler builds the "Received From" path by reading the # This event handler builds the "Received From" path by reading the
# headers in the mail # headers in the mail
event mime_one_header(c: connection, h: mime_header_rec) &priority=3 event mime_one_header(c: connection, h: mime_header_rec) &priority=3
{ {
# If we've decided that we're done watching the received headers for # If we've decided that we're done watching the received headers for
# whatever reason, we're done. Could be due to only watching until # whatever reason, we're done. Could be due to only watching until
# local addresses are seen in the received from headers. # local addresses are seen in the received from headers.
if ( ! c?$smtp || h$name != "RECEIVED" || ! c$smtp$process_received_from ) if ( ! c?$smtp || h$name != "RECEIVED" || ! c$smtp$process_received_from )
return; return;
@ -261,7 +267,7 @@ event mime_one_header(c: connection, h: mime_header_rec) &priority=3
return; return;
local ip = to_addr(text_ip); local ip = to_addr(text_ip);
if ( ! addr_matches_host(ip, mail_path_capture) && if ( ! addr_matches_host(ip, mail_path_capture) &&
! Site::is_private_addr(ip) ) ! Site::is_private_addr(ip) )
{ {
c$smtp$process_received_from = F; c$smtp$process_received_from = F;
@ -276,6 +282,15 @@ event connection_state_remove(c: connection) &priority=-5
smtp_message(c); smtp_message(c);
} }
event smtp_starttls(c: connection) &priority=5
{
if ( c?$smtp )
{
c$smtp$tls = T;
c$smtp$has_client_activity = T;
}
}
function describe(rec: Info): string function describe(rec: Info): string
{ {
if ( rec?$mailfrom && rec?$rcptto ) if ( rec?$mailfrom && rec?$rcptto )

View file

@ -0,0 +1 @@
Support for Simple Network Management Protocol (SNMP) analysis.

View file

@ -0,0 +1 @@
@load ./main

View file

@ -0,0 +1,182 @@
##! Enables analysis and logging of SNMP datagrams.
module SNMP;
export {
redef enum Log::ID += { LOG };
## Information tracked per SNMP session.
type Info: record {
## Timestamp of first packet belonging to the SNMP session.
ts: time &log;
## The unique ID for the connection.
uid: string &log;
## The connection's 5-tuple of addresses/ports (ports inherently
## include transport protocol information)
id: conn_id &log;
## The amount of time between the first packet beloning to
## the SNMP session and the latest one seen.
duration: interval &log &default=0secs;
## The version of SNMP being used.
version: string &log;
## The community string of the first SNMP packet associated with
## the session. This is used as part of SNMP's (v1 and v2c)
## administrative/security framework. See :rfc:`1157` or :rfc:`1901`.
community: string &log &optional;
## The number of variable bindings in GetRequest/GetNextRequest PDUs
## seen for the session.
get_requests: count &log &default=0;
## The number of variable bindings in GetBulkRequest PDUs seen for
## the session.
get_bulk_requests: count &log &default=0;
## The number of variable bindings in GetResponse/Response PDUs seen
## for the session.
get_responses: count &log &default=0;
## The number of variable bindings in SetRequest PDUs seen for
## the session.
set_requests: count &log &default=0;
## A system description of the SNMP responder endpoint.
display_string: string &log &optional;
## The time at which the SNMP responder endpoint claims it's been
## up since.
up_since: time &log &optional;
};
## Maps an SNMP version integer to a human readable string.
const version_map: table[count] of string = {
[0] = "1",
[1] = "2c",
[3] = "3",
} &redef &default="unknown";
## Event that can be handled to access the SNMP record as it is sent on
## to the logging framework.
global log_snmp: event(rec: Info);
}
redef record connection += {
snmp: SNMP::Info &optional;
};
const ports = { 161/udp, 162/udp };
redef likely_server_ports += { ports };
event bro_init() &priority=5
{
Analyzer::register_for_ports(Analyzer::ANALYZER_SNMP, ports);
Log::create_stream(SNMP::LOG, [$columns=SNMP::Info, $ev=log_snmp]);
}
function init_state(c: connection, h: SNMP::Header): Info
{
if ( ! c?$snmp )
{
c$snmp = Info($ts=network_time(),
$uid=c$uid, $id=c$id,
$version=version_map[h$version]);
}
local s = c$snmp;
if ( ! s?$community )
{
if ( h?$v1 )
s$community = h$v1$community;
else if ( h?$v2 )
s$community = h$v2$community;
}
s$duration = network_time() - s$ts;
return s;
}
event connection_state_remove(c: connection) &priority=-5
{
if ( c?$snmp )
Log::write(LOG, c$snmp);
}
event snmp_get_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5
{
local s = init_state(c, header);
s$get_requests += |pdu$bindings|;
}
event snmp_get_bulk_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::BulkPDU) &priority=5
{
local s = init_state(c, header);
s$get_bulk_requests += |pdu$bindings|;
}
event snmp_get_next_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5
{
local s = init_state(c, header);
s$get_requests += |pdu$bindings|;
}
event snmp_response(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5
{
local s = init_state(c, header);
s$get_responses += |pdu$bindings|;
for ( i in pdu$bindings )
{
local binding = pdu$bindings[i];
if ( binding$oid == "1.3.6.1.2.1.1.1.0" && binding$value?$octets )
c$snmp$display_string = binding$value$octets;
else if ( binding$oid == "1.3.6.1.2.1.1.3.0" && binding$value?$unsigned )
{
local up_seconds = binding$value$unsigned / 100.0;
s$up_since = network_time() - double_to_interval(up_seconds);
}
}
}
event snmp_set_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5
{
local s = init_state(c, header);
s$set_requests += |pdu$bindings|;
}
event snmp_trap(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::TrapPDU) &priority=5
{
init_state(c, header);
}
event snmp_inform_request(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5
{
init_state(c, header);
}
event snmp_trapV2(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5
{
init_state(c, header);
}
event snmp_report(c: connection, is_orig: bool, header: SNMP::Header, pdu: SNMP::PDU) &priority=5
{
init_state(c, header);
}
event snmp_unknown_pdu(c: connection, is_orig: bool, header: SNMP::Header, tag: count) &priority=5
{
init_state(c, header);
}
event snmp_unknown_scoped_pdu(c: connection, is_orig: bool, header: SNMP::Header, tag: count) &priority=5
{
init_state(c, header);
}
event snmp_encrypted_pdu(c: connection, is_orig: bool, header: SNMP::Header) &priority=5
{
init_state(c, header);
}
#event snmp_unknown_header_version(c: connection, is_orig: bool, version: count) &priority=5
# {
# }

View file

@ -1,5 +1,6 @@
@load ./consts @load ./consts
@load ./main @load ./main
@load ./mozilla-ca-list @load ./mozilla-ca-list
@load ./files
@load-sigs ./dpd.sig @load-sigs ./dpd.sig

View file

@ -14,15 +14,43 @@ export {
[TLSv11] = "TLSv11", [TLSv11] = "TLSv11",
[TLSv12] = "TLSv12", [TLSv12] = "TLSv12",
} &default=function(i: count):string { return fmt("unknown-%d", i); }; } &default=function(i: count):string { return fmt("unknown-%d", i); };
## Mapping between numeric codes and human readable strings for alert ## TLS content types:
const CHANGE_CIPHER_SPEC = 20;
const ALERT = 21;
const HANDSHAKE = 22;
const APPLICATION_DATA = 23;
const HEARTBEAT = 24;
const V2_ERROR = 300;
const V2_CLIENT_HELLO = 301;
const V2_CLIENT_MASTER_KEY = 302;
const V2_SERVER_HELLO = 304;
## TLS Handshake types:
const HELLO_REQUEST = 0;
const CLIENT_HELLO = 1;
const SERVER_HELLO = 2;
const HELLO_VERIFY_REQUEST = 3; # RFC 6347
const SESSION_TICKET = 4; # RFC 5077
const CERTIFICATE = 11;
const SERVER_KEY_EXCHANGE = 12;
const CERTIFICATE_REQUEST = 13;
const SERVER_HELLO_DONE = 14;
const CERTIFICATE_VERIFY = 15;
const CLIENT_KEY_EXCHANGE = 16;
const FINISHED = 20;
const CERTIFICATE_URL = 21; # RFC 3546
const CERTIFICATE_STATUS = 22; # RFC 3546
const SUPPLEMENTAL_DATA = 23; # RFC 4680
## Mapping between numeric codes and human readable strings for alert
## levels. ## levels.
const alert_levels: table[count] of string = { const alert_levels: table[count] of string = {
[1] = "warning", [1] = "warning",
[2] = "fatal", [2] = "fatal",
} &default=function(i: count):string { return fmt("unknown-%d", i); }; } &default=function(i: count):string { return fmt("unknown-%d", i); };
## Mapping between numeric codes and human readable strings for alert ## Mapping between numeric codes and human readable strings for alert
## descriptions. ## descriptions.
const alert_descriptions: table[count] of string = { const alert_descriptions: table[count] of string = {
[0] = "close_notify", [0] = "close_notify",
@ -58,7 +86,7 @@ export {
[115] = "unknown_psk_identity", [115] = "unknown_psk_identity",
[120] = "no_application_protocol", [120] = "no_application_protocol",
} &default=function(i: count):string { return fmt("unknown-%d", i); }; } &default=function(i: count):string { return fmt("unknown-%d", i); };
## Mapping between numeric codes and human readable strings for SSL/TLS ## Mapping between numeric codes and human readable strings for SSL/TLS
## extensions. ## extensions.
# More information can be found here: # More information can be found here:
@ -83,6 +111,11 @@ export {
[16] = "application_layer_protocol_negotiation", [16] = "application_layer_protocol_negotiation",
[17] = "status_request_v2", [17] = "status_request_v2",
[18] = "signed_certificate_timestamp", [18] = "signed_certificate_timestamp",
[19] = "client_certificate_type",
[20] = "server_certificate_type",
[21] = "padding", # temporary till 2015-03-12
[22] = "encrypt_then_mac",
[23] = "extended_master_secret", # temporary till 2015-09-26
[35] = "SessionTicket TLS", [35] = "SessionTicket TLS",
[40] = "extended_random", [40] = "extended_random",
[13172] = "next_protocol_negotiation", [13172] = "next_protocol_negotiation",
@ -93,7 +126,56 @@ export {
[35655] = "padding", [35655] = "padding",
[65281] = "renegotiation_info" [65281] = "renegotiation_info"
} &default=function(i: count):string { return fmt("unknown-%d", i); }; } &default=function(i: count):string { return fmt("unknown-%d", i); };
## Mapping between numeric codes and human readable string for SSL/TLS elliptic curves.
# See http://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8
const ec_curves: table[count] of string = {
[1] = "sect163k1",
[2] = "sect163r1",
[3] = "sect163r2",
[4] = "sect193r1",
[5] = "sect193r2",
[6] = "sect233k1",
[7] = "sect233r1",
[8] = "sect239k1",
[9] = "sect283k1",
[10] = "sect283r1",
[11] = "sect409k1",
[12] = "sect409r1",
[13] = "sect571k1",
[14] = "sect571r1",
[15] = "secp160k1",
[16] = "secp160r1",
[17] = "secp160r2",
[18] = "secp192k1",
[19] = "secp192r1",
[20] = "secp224k1",
[21] = "secp224r1",
[22] = "secp256k1",
[23] = "secp256r1",
[24] = "secp384r1",
[25] = "secp521r1",
[26] = "brainpoolP256r1",
[27] = "brainpoolP384r1",
[28] = "brainpoolP512r1",
# draft-ietf-tls-negotiated-ff-dhe-02
[256] = "ffdhe2432",
[257] = "ffdhe3072",
[258] = "ffdhe4096",
[259] = "ffdhe6144",
[260] = "ffdhe8192",
[0xFF01] = "arbitrary_explicit_prime_curves",
[0xFF02] = "arbitrary_explicit_char2_curves"
} &default=function(i: count):string { return fmt("unknown-%d", i); };
## Mapping between numeric codes and human readable string for SSL/TLC EC point formats.
# See http://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-9
const ec_point_formats: table[count] of string = {
[0] = "uncompressed",
[1] = "ansiX962_compressed_prime",
[2] = "ansiX962_compressed_char2"
} &default=function(i: count):string { return fmt("unknown-%d", i); };
# SSLv2 # SSLv2
const SSLv20_CK_RC4_128_WITH_MD5 = 0x010080; const SSLv20_CK_RC4_128_WITH_MD5 = 0x010080;
const SSLv20_CK_RC4_128_EXPORT40_WITH_MD5 = 0x020080; const SSLv20_CK_RC4_128_EXPORT40_WITH_MD5 = 0x020080;
@ -444,6 +526,10 @@ export {
const TLS_PSK_WITH_AES_256_CCM_8 = 0xC0A9; const TLS_PSK_WITH_AES_256_CCM_8 = 0xC0A9;
const TLS_PSK_DHE_WITH_AES_128_CCM_8 = 0xC0AA; const TLS_PSK_DHE_WITH_AES_128_CCM_8 = 0xC0AA;
const TLS_PSK_DHE_WITH_AES_256_CCM_8 = 0xC0AB; const TLS_PSK_DHE_WITH_AES_256_CCM_8 = 0xC0AB;
const TLS_ECDHE_ECDSA_WITH_AES_128_CCM = 0xC0AC;
const TLS_ECDHE_ECDSA_WITH_AES_256_CCM = 0xC0AD;
const TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 = 0xC0AE;
const TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 = 0xC0AF;
# draft-agl-tls-chacha20poly1305-02 # draft-agl-tls-chacha20poly1305-02
const TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCC13; const TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCC13;
const TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCC14; const TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCC14;
@ -458,8 +544,8 @@ export {
const SSL_RSA_WITH_DES_CBC_MD5 = 0xFF82; const SSL_RSA_WITH_DES_CBC_MD5 = 0xFF82;
const SSL_RSA_WITH_3DES_EDE_CBC_MD5 = 0xFF83; const SSL_RSA_WITH_3DES_EDE_CBC_MD5 = 0xFF83;
const TLS_EMPTY_RENEGOTIATION_INFO_SCSV = 0x00FF; const TLS_EMPTY_RENEGOTIATION_INFO_SCSV = 0x00FF;
## This is a table of all known cipher specs. It can be used for ## This is a table of all known cipher specs. It can be used for
## detecting unknown ciphers and for converting the cipher spec ## detecting unknown ciphers and for converting the cipher spec
## constants into a human readable format. ## constants into a human readable format.
const cipher_desc: table[count] of string = { const cipher_desc: table[count] of string = {
@ -807,6 +893,10 @@ export {
[TLS_PSK_WITH_AES_256_CCM_8] = "TLS_PSK_WITH_AES_256_CCM_8", [TLS_PSK_WITH_AES_256_CCM_8] = "TLS_PSK_WITH_AES_256_CCM_8",
[TLS_PSK_DHE_WITH_AES_128_CCM_8] = "TLS_PSK_DHE_WITH_AES_128_CCM_8", [TLS_PSK_DHE_WITH_AES_128_CCM_8] = "TLS_PSK_DHE_WITH_AES_128_CCM_8",
[TLS_PSK_DHE_WITH_AES_256_CCM_8] = "TLS_PSK_DHE_WITH_AES_256_CCM_8", [TLS_PSK_DHE_WITH_AES_256_CCM_8] = "TLS_PSK_DHE_WITH_AES_256_CCM_8",
[TLS_ECDHE_ECDSA_WITH_AES_128_CCM] = "TLS_ECDHE_ECDSA_WITH_AES_128_CCM",
[TLS_ECDHE_ECDSA_WITH_AES_256_CCM] = "TLS_ECDHE_ECDSA_WITH_AES_256_CCM",
[TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8] = "TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8",
[TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8] = "TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8",
[TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", [TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
[TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", [TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
[TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256", [TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256] = "TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
@ -820,43 +910,5 @@ export {
[SSL_RSA_WITH_3DES_EDE_CBC_MD5] = "SSL_RSA_WITH_3DES_EDE_CBC_MD5", [SSL_RSA_WITH_3DES_EDE_CBC_MD5] = "SSL_RSA_WITH_3DES_EDE_CBC_MD5",
[TLS_EMPTY_RENEGOTIATION_INFO_SCSV] = "TLS_EMPTY_RENEGOTIATION_INFO_SCSV", [TLS_EMPTY_RENEGOTIATION_INFO_SCSV] = "TLS_EMPTY_RENEGOTIATION_INFO_SCSV",
} &default=function(i: count):string { return fmt("unknown-%d", i); }; } &default=function(i: count):string { return fmt("unknown-%d", i); };
## Mapping between the constants and string values for SSL/TLS errors.
const x509_errors: table[count] of string = {
[0] = "ok",
[1] = "unable to get issuer cert",
[2] = "unable to get crl",
[3] = "unable to decrypt cert signature",
[4] = "unable to decrypt crl signature",
[5] = "unable to decode issuer public key",
[6] = "cert signature failure",
[7] = "crl signature failure",
[8] = "cert not yet valid",
[9] = "cert has expired",
[10] = "crl not yet valid",
[11] = "crl has expired",
[12] = "error in cert not before field",
[13] = "error in cert not after field",
[14] = "error in crl last update field",
[15] = "error in crl next update field",
[16] = "out of mem",
[17] = "depth zero self signed cert",
[18] = "self signed cert in chain",
[19] = "unable to get issuer cert locally",
[20] = "unable to verify leaf signature",
[21] = "cert chain too long",
[22] = "cert revoked",
[23] = "invalid ca",
[24] = "path length exceeded",
[25] = "invalid purpose",
[26] = "cert untrusted",
[27] = "cert rejected",
[28] = "subject issuer mismatch",
[29] = "akid skid mismatch",
[30] = "akid issuer serial mismatch",
[31] = "keyusage no certsign",
[32] = "unable to get crl issuer",
[33] = "unhandled critical extension",
} &default=function(i: count):string { return fmt("unknown-%d", i); };
} }

View file

@ -1,7 +1,7 @@
signature dpd_ssl_server { signature dpd_ssl_server {
ip-proto == tcp ip-proto == tcp
# Server hello. # Server hello.
payload /^(\x16\x03[\x00\x01\x02]..\x02...\x03[\x00\x01\x02]|...?\x04..\x00\x02).*/ payload /^(\x16\x03[\x00\x01\x02\x03]..\x02...\x03[\x00\x01\x02\x03]|...?\x04..\x00\x02).*/
requires-reverse-signature dpd_ssl_client requires-reverse-signature dpd_ssl_client
enable "ssl" enable "ssl"
tcp-state responder tcp-state responder
@ -10,6 +10,6 @@ signature dpd_ssl_server {
signature dpd_ssl_client { signature dpd_ssl_client {
ip-proto == tcp ip-proto == tcp
# Client hello. # Client hello.
payload /^(\x16\x03[\x00\x01\x02]..\x01...\x03[\x00\x01\x02]|...?\x01[\x00\x01\x02][\x02\x03]).*/ payload /^(\x16\x03[\x00\x01\x02\x03]..\x01...\x03[\x00\x01\x02\x03]|...?\x01[\x00\x03][\x00\x01\x02\x03]).*/
tcp-state originator tcp-state originator
} }

View file

@ -0,0 +1,137 @@
@load ./main
@load base/utils/conn-ids
@load base/frameworks/files
@load base/files/x509
module SSL;
export {
redef record Info += {
## Chain of certificates offered by the server to validate its
## complete signing chain.
cert_chain: vector of Files::Info &optional;
## An ordered vector of all certicate file unique IDs for the
## certificates offered by the server.
cert_chain_fuids: vector of string &optional &log;
## Chain of certificates offered by the client to validate its
## complete signing chain.
client_cert_chain: vector of Files::Info &optional;
## An ordered vector of all certicate file unique IDs for the
## certificates offered by the client.
client_cert_chain_fuids: vector of string &optional &log;
## Subject of the X.509 certificate offered by the server.
subject: string &log &optional;
## Subject of the signer of the X.509 certificate offered by the
## server.
issuer: string &log &optional;
## Subject of the X.509 certificate offered by the client.
client_subject: string &log &optional;
## Subject of the signer of the X.509 certificate offered by the
## client.
client_issuer: string &log &optional;
## Current number of certificates seen from either side. Used
## to create file handles.
server_depth: count &default=0;
client_depth: count &default=0;
};
## Default file handle provider for SSL.
global get_file_handle: function(c: connection, is_orig: bool): string;
## Default file describer for SSL.
global describe_file: function(f: fa_file): string;
}
function get_file_handle(c: connection, is_orig: bool): string
{
# Unused. File handles are generated in the analyzer.
return "";
}
function describe_file(f: fa_file): string
{
if ( f$source != "SSL" || ! f?$info || ! f$info?$x509 || ! f$info$x509?$certificate )
return "";
# It is difficult to reliably describe a certificate - especially since
# we do not know when this function is called (hence, if the data structures
# are already populated).
#
# Just return a bit of our connection information and hope that that is good enough.
for ( cid in f$conns )
{
if ( f$conns[cid]?$ssl )
{
local c = f$conns[cid];
return cat(c$id$resp_h, ":", c$id$resp_p);
}
}
return cat("Serial: ", f$info$x509$certificate$serial, " Subject: ",
f$info$x509$certificate$subject, " Issuer: ",
f$info$x509$certificate$issuer);
}
event bro_init() &priority=5
{
Files::register_protocol(Analyzer::ANALYZER_SSL,
[$get_file_handle = SSL::get_file_handle,
$describe = SSL::describe_file]);
}
event file_over_new_connection(f: fa_file, c: connection, is_orig: bool) &priority=5
{
if ( ! c?$ssl )
return;
if ( ! c$ssl?$cert_chain )
{
c$ssl$cert_chain = vector();
c$ssl$client_cert_chain = vector();
c$ssl$cert_chain_fuids = string_vec();
c$ssl$client_cert_chain_fuids = string_vec();
}
if ( is_orig )
{
c$ssl$client_cert_chain[|c$ssl$client_cert_chain|] = f$info;
c$ssl$client_cert_chain_fuids[|c$ssl$client_cert_chain_fuids|] = f$id;
}
else
{
c$ssl$cert_chain[|c$ssl$cert_chain|] = f$info;
c$ssl$cert_chain_fuids[|c$ssl$cert_chain_fuids|] = f$id;
}
Files::add_analyzer(f, Files::ANALYZER_X509);
# always calculate hashes. They are not necessary for base scripts
# but very useful for identification, and required for policy scripts
Files::add_analyzer(f, Files::ANALYZER_MD5);
Files::add_analyzer(f, Files::ANALYZER_SHA1);
}
event ssl_established(c: connection) &priority=6
{
# update subject and issuer information
if ( c$ssl?$cert_chain && |c$ssl$cert_chain| > 0 &&
c$ssl$cert_chain[0]?$x509 )
{
c$ssl$subject = c$ssl$cert_chain[0]$x509$certificate$subject;
c$ssl$issuer = c$ssl$cert_chain[0]$x509$certificate$issuer;
}
if ( c$ssl?$client_cert_chain && |c$ssl$client_cert_chain| > 0 &&
c$ssl$client_cert_chain[0]?$x509 )
{
c$ssl$client_subject = c$ssl$client_cert_chain[0]$x509$certificate$subject;
c$ssl$client_issuer = c$ssl$client_cert_chain[0]$x509$certificate$issuer;
}
}

View file

@ -12,47 +12,38 @@ export {
## Time when the SSL connection was first detected. ## Time when the SSL connection was first detected.
ts: time &log; ts: time &log;
## Unique ID for the connection. ## Unique ID for the connection.
uid: string &log; uid: string &log;
## The connection's 4-tuple of endpoint addresses/ports. ## The connection's 4-tuple of endpoint addresses/ports.
id: conn_id &log; id: conn_id &log;
## SSL/TLS version that the server offered. ## SSL/TLS version that the server offered.
version: string &log &optional; version: string &log &optional;
## SSL/TLS cipher suite that the server chose. ## SSL/TLS cipher suite that the server chose.
cipher: string &log &optional; cipher: string &log &optional;
## Elliptic curve the server chose when using ECDH/ECDHE.
curve: string &log &optional;
## Value of the Server Name Indicator SSL/TLS extension. It ## Value of the Server Name Indicator SSL/TLS extension. It
## indicates the server name that the client was requesting. ## indicates the server name that the client was requesting.
server_name: string &log &optional; server_name: string &log &optional;
## Session ID offered by the client for session resumption. ## Session ID offered by the client for session resumption.
session_id: string &log &optional; ## Not used for logging.
## Subject of the X.509 certificate offered by the server. session_id: string &optional;
subject: string &log &optional; ## Flag to indicate if the session was resumed reusing
## Subject of the signer of the X.509 certificate offered by the ## the key material exchanged in an earlier connection.
## server. resumed: bool &log &default=F;
issuer_subject: string &log &optional; ## Flag to indicate if we saw a non-empty session ticket being
## NotValidBefore field value from the server certificate. ## sent by the client using an empty session ID. This value
not_valid_before: time &log &optional; ## is used to determine if a session is being resumed. It's
## NotValidAfter field value from the server certificate. ## not logged.
not_valid_after: time &log &optional; client_ticket_empty_session_seen: bool &default=F;
## Flag to indicate if we saw a client key exchange message sent
## by the client. This value is used to determine if a session
## is being resumed. It's not logged.
client_key_exchange_seen: bool &default=F;
## Last alert that was seen during the connection. ## Last alert that was seen during the connection.
last_alert: string &log &optional; last_alert: string &log &optional;
## Next protocol the server chose using the application layer
## Subject of the X.509 certificate offered by the client. ## next protocol extension, if present.
client_subject: string &log &optional; next_protocol: string &log &optional;
## Subject of the signer of the X.509 certificate offered by the
## client.
client_issuer_subject: string &log &optional;
## Full binary server certificate stored in DER format.
cert: string &optional;
## Chain of certificates offered by the server to validate its
## complete signing chain.
cert_chain: vector of string &optional;
## Full binary client certificate stored in DER format.
client_cert: string &optional;
## Chain of certificates offered by the client to validate its
## complete signing chain.
client_cert_chain: vector of string &optional;
## The analyzer ID used for the analyzer instance attached ## The analyzer ID used for the analyzer instance attached
## to each connection. It is not used for logging since it's a ## to each connection. It is not used for logging since it's a
@ -61,11 +52,11 @@ export {
## Flag to indicate if this ssl session has been established ## Flag to indicate if this ssl session has been established
## succesfully, or if it was aborted during the handshake. ## succesfully, or if it was aborted during the handshake.
established: bool &log &default=F; established: bool &log &default=F;
## Flag to indicate if this record already has been logged, to ## Flag to indicate if this record already has been logged, to
## prevent duplicates. ## prevent duplicates.
logged: bool &default=F; logged: bool &default=F;
}; };
## The default root CA bundle. By default, the mozilla-ca-list.bro ## The default root CA bundle. By default, the mozilla-ca-list.bro
@ -116,8 +107,7 @@ event bro_init() &priority=5
function set_session(c: connection) function set_session(c: connection)
{ {
if ( ! c?$ssl ) if ( ! c?$ssl )
c$ssl = [$ts=network_time(), $uid=c$uid, $id=c$id, $cert_chain=vector(), c$ssl = [$ts=network_time(), $uid=c$uid, $id=c$id];
$client_cert_chain=vector()];
} }
function delay_log(info: Info, token: string) function delay_log(info: Info, token: string)
@ -164,8 +154,10 @@ function finish(c: connection, remove_analyzer: bool)
{ {
log_record(c$ssl); log_record(c$ssl);
if ( remove_analyzer && disable_analyzer_after_detection && c?$ssl && c$ssl?$analyzer_id ) if ( remove_analyzer && disable_analyzer_after_detection && c?$ssl && c$ssl?$analyzer_id )
{
disable_analyzer(c$id, c$ssl$analyzer_id); disable_analyzer(c$id, c$ssl$analyzer_id);
delete c$ssl$analyzer_id; delete c$ssl$analyzer_id;
}
} }
event ssl_client_hello(c: connection, version: count, possible_ts: time, client_random: string, session_id: string, ciphers: index_vec) &priority=5 event ssl_client_hello(c: connection, version: count, possible_ts: time, client_random: string, session_id: string, ciphers: index_vec) &priority=5
@ -173,8 +165,11 @@ event ssl_client_hello(c: connection, version: count, possible_ts: time, client_
set_session(c); set_session(c);
# Save the session_id if there is one set. # Save the session_id if there is one set.
if ( session_id != /^\x00{32}$/ ) if ( |session_id| > 0 && session_id != /^\x00{32}$/ )
{
c$ssl$session_id = bytestring_to_hexstr(session_id); c$ssl$session_id = bytestring_to_hexstr(session_id);
c$ssl$client_ticket_empty_session_seen = F;
}
} }
event ssl_server_hello(c: connection, version: count, possible_ts: time, server_random: string, session_id: string, cipher: count, comp_method: count) &priority=5 event ssl_server_hello(c: connection, version: count, possible_ts: time, server_random: string, session_id: string, cipher: count, comp_method: count) &priority=5
@ -183,57 +178,67 @@ event ssl_server_hello(c: connection, version: count, possible_ts: time, server_
c$ssl$version = version_strings[version]; c$ssl$version = version_strings[version];
c$ssl$cipher = cipher_desc[cipher]; c$ssl$cipher = cipher_desc[cipher];
if ( c$ssl?$session_id && c$ssl$session_id == bytestring_to_hexstr(session_id) )
c$ssl$resumed = T;
} }
event x509_certificate(c: connection, is_orig: bool, cert: X509, chain_idx: count, chain_len: count, der_cert: string) &priority=5 event ssl_server_curve(c: connection, curve: count) &priority=5
{ {
set_session(c); set_session(c);
# We aren't doing anything with client certificates yet. c$ssl$curve = ec_curves[curve];
if ( is_orig ) }
{
if ( chain_idx == 0 )
{
# Save the primary cert.
c$ssl$client_cert = der_cert;
# Also save other certificate information about the primary cert. event ssl_extension_server_name(c: connection, is_orig: bool, names: string_vec) &priority=5
c$ssl$client_subject = cert$subject; {
c$ssl$client_issuer_subject = cert$issuer; set_session(c);
}
else
{
# Otherwise, add it to the cert validation chain.
c$ssl$client_cert_chain[|c$ssl$client_cert_chain|] = der_cert;
}
}
else
{
if ( chain_idx == 0 )
{
# Save the primary cert.
c$ssl$cert = der_cert;
# Also save other certificate information about the primary cert. if ( is_orig && |names| > 0 )
c$ssl$subject = cert$subject; {
c$ssl$issuer_subject = cert$issuer; c$ssl$server_name = names[0];
c$ssl$not_valid_before = cert$not_valid_before; if ( |names| > 1 )
c$ssl$not_valid_after = cert$not_valid_after; event conn_weird("SSL_many_server_names", c, cat(names));
}
else
{
# Otherwise, add it to the cert validation chain.
c$ssl$cert_chain[|c$ssl$cert_chain|] = der_cert;
}
} }
} }
event ssl_extension_application_layer_protocol_negotiation(c: connection, is_orig: bool, protocols: string_vec)
{
set_session(c);
if ( is_orig )
return;
if ( |protocols| > 0 )
c$ssl$next_protocol = protocols[0];
}
event ssl_handshake_message(c: connection, is_orig: bool, msg_type: count, length: count) &priority=5
{
set_session(c);
if ( is_orig && msg_type == SSL::CLIENT_KEY_EXCHANGE )
c$ssl$client_key_exchange_seen = T;
}
# Extension event is fired _before_ the respective client or server hello.
# Important for client_ticket_empty_session_seen.
event ssl_extension(c: connection, is_orig: bool, code: count, val: string) &priority=5 event ssl_extension(c: connection, is_orig: bool, code: count, val: string) &priority=5
{ {
set_session(c); set_session(c);
if ( is_orig && extensions[code] == "server_name" ) if ( is_orig && SSL::extensions[code] == "SessionTicket TLS" && |val| > 0 )
c$ssl$server_name = sub_bytes(val, 6, |val|); # In this case, we might have an empty ID. Set back to F in client_hello event
# if it is not empty after all.
c$ssl$client_ticket_empty_session_seen = T;
}
event ssl_change_cipher_spec(c: connection, is_orig: bool) &priority=5
{
set_session(c);
if ( is_orig && c$ssl$client_ticket_empty_session_seen && ! c$ssl$client_key_exchange_seen )
c$ssl$resumed = T;
} }
event ssl_alert(c: connection, is_orig: bool, level: count, desc: count) &priority=5 event ssl_alert(c: connection, is_orig: bool, level: count, desc: count) &priority=5
@ -243,7 +248,7 @@ event ssl_alert(c: connection, is_orig: bool, level: count, desc: count) &priori
c$ssl$last_alert = alert_descriptions[desc]; c$ssl$last_alert = alert_descriptions[desc];
} }
event ssl_established(c: connection) &priority=5 event ssl_established(c: connection) &priority=7
{ {
set_session(c); set_session(c);
c$ssl$established = T; c$ssl$established = T;
@ -263,7 +268,7 @@ event connection_state_remove(c: connection) &priority=-5
event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=5 event protocol_confirmation(c: connection, atype: Analyzer::Tag, aid: count) &priority=5
{ {
if ( atype == Analyzer::ANALYZER_SSL ) if ( atype == Analyzer::ANALYZER_SSL )
{ {
set_session(c); set_session(c);
c$ssl$analyzer_id = aid; c$ssl$analyzer_id = aid;

View file

@ -65,12 +65,14 @@ function request2curl(r: Request, bodyfile: string, headersfile: string): string
cmd = fmt("%s -m %.0f", cmd, r$max_time); cmd = fmt("%s -m %.0f", cmd, r$max_time);
if ( r?$client_data ) if ( r?$client_data )
cmd = fmt("%s -d -", cmd); cmd = fmt("%s -d @-", cmd);
if ( r?$addl_curl_args ) if ( r?$addl_curl_args )
cmd = fmt("%s %s", cmd, r$addl_curl_args); cmd = fmt("%s %s", cmd, r$addl_curl_args);
cmd = fmt("%s \"%s\"", cmd, str_shell_escape(r$url)); cmd = fmt("%s \"%s\"", cmd, str_shell_escape(r$url));
# Make sure file will exist even if curl did not write one.
cmd = fmt("%s && touch %s", cmd, str_shell_escape(bodyfile));
return cmd; return cmd;
} }

View file

@ -1,4 +1,4 @@
##! Functions for parsing and manipulating IP addresses. ##! Functions for parsing and manipulating IP and MAC addresses.
# Regular expressions for matching IP addresses in strings. # Regular expressions for matching IP addresses in strings.
const ipv4_addr_regex = /[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}/; const ipv4_addr_regex = /[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}/;
@ -14,13 +14,13 @@ const ipv6_compressed_hex4dec_regex = /(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)
# ipv6_compressed_hex4dec_regex; # ipv6_compressed_hex4dec_regex;
#const ip_addr_regex = ipv4_addr_regex | ipv6_addr_regex; #const ip_addr_regex = ipv4_addr_regex | ipv6_addr_regex;
const ipv6_addr_regex = const ipv6_addr_regex =
/([0-9A-Fa-f]{1,4}:){7}[0-9A-Fa-f]{1,4}/ | /([0-9A-Fa-f]{1,4}:){7}[0-9A-Fa-f]{1,4}/ |
/(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)/ | # IPv6 Compressed Hex /(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)/ | # IPv6 Compressed Hex
/(([0-9A-Fa-f]{1,4}:){6,6})([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/ | # 6Hex4Dec /(([0-9A-Fa-f]{1,4}:){6,6})([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/ | # 6Hex4Dec
/(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}:)*)([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/; # CompressedHex4Dec /(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}:)*)([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/; # CompressedHex4Dec
const ip_addr_regex = const ip_addr_regex =
/[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}/ | /[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}\.[[:digit:]]{1,3}/ |
/([0-9A-Fa-f]{1,4}:){7}[0-9A-Fa-f]{1,4}/ | /([0-9A-Fa-f]{1,4}:){7}[0-9A-Fa-f]{1,4}/ |
/(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)/ | # IPv6 Compressed Hex /(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)::(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4})*)?)/ | # IPv6 Compressed Hex
@ -57,7 +57,7 @@ function is_valid_ip(ip_str: string): bool
octets = split(ip_str, /\./); octets = split(ip_str, /\./);
if ( |octets| != 4 ) if ( |octets| != 4 )
return F; return F;
return has_valid_octets(octets); return has_valid_octets(octets);
} }
else if ( ip_str == ipv6_addr_regex ) else if ( ip_str == ipv6_addr_regex )
@ -119,3 +119,30 @@ function addr_to_uri(a: addr): string
else else
return fmt("[%s]", a); return fmt("[%s]", a);
} }
## Given a string, extracts the hex digits and returns a MAC address in
## the format: 00:a0:32:d7:81:8f. If the string doesn't contain 12 or 16 hex
## digits, an empty string is returned.
##
## a: the string to normalize.
##
## Returns: a normalized MAC address, or an empty string in the case of an error.
function normalize_mac(a: string): string
{
local result = to_lower(gsub(a, /[^A-Fa-f0-9]/, ""));
local octets: string_vec;
if ( |result| == 12 )
{
octets = str_split(result, vector(2, 4, 6, 8, 10));
return fmt("%s:%s:%s:%s:%s:%s", octets[1], octets[2], octets[3], octets[4], octets[5], octets[6]);
}
if ( |result| == 16 )
{
octets = str_split(result, vector(2, 4, 6, 8, 10, 12, 14));
return fmt("%s:%s:%s:%s:%s:%s:%s:%s", octets[1], octets[2], octets[3], octets[4], octets[5], octets[6], octets[7], octets[8]);
}
return "";
}

View file

@ -96,8 +96,9 @@ event Exec::file_line(description: Input::EventDescription, tpe: Input::Event, s
result$files[track_file][|result$files[track_file]|] = s; result$files[track_file][|result$files[track_file]|] = s;
} }
event Input::end_of_data(name: string, source:string) event Input::end_of_data(orig_name: string, source:string)
{ {
local name = orig_name;
local parts = split1(name, /_/); local parts = split1(name, /_/);
name = parts[1]; name = parts[1];
@ -106,6 +107,15 @@ event Input::end_of_data(name: string, source:string)
local track_file = parts[2]; local track_file = parts[2];
# If the file is empty, still add it to the result$files table. This is needed
# because it is expected that the file was read even if it was empty.
local result = results[name];
if ( ! result?$files )
result$files = table();
if ( track_file !in result$files )
result$files[track_file] = vector();
Input::remove(name); Input::remove(name);
if ( name !in pending_files ) if ( name !in pending_files )

View file

@ -3,6 +3,28 @@
## A regular expression for matching and extracting URLs. ## A regular expression for matching and extracting URLs.
const url_regex = /^([a-zA-Z\-]{3,5})(:\/\/[^\/?#"'\r\n><]*)([^?#"'\r\n><]*)([^[:blank:]\r\n"'><]*|\??[^"'\r\n><]*)/ &redef; const url_regex = /^([a-zA-Z\-]{3,5})(:\/\/[^\/?#"'\r\n><]*)([^?#"'\r\n><]*)([^[:blank:]\r\n"'><]*|\??[^"'\r\n><]*)/ &redef;
## A URI, as parsed by :bro:id:`decompose_uri`.
type URI: record {
## The URL's scheme..
scheme: string &optional;
## The location, which could be a domain name or an IP address. Left empty if not
## specified.
netlocation: string;
## Port number, if included in URI.
portnum: count &optional;
## Full including the file name. Will be '/' if there's not path given.
path: string;
## Full file name, including extension, if there is a file name.
file_name: string &optional;
## The base filename, without extension, if there is a file name.
file_base: string &optional;
## The filename's extension, if there is a file name.
file_ext: string &optional;
## A table of all query parameters, mapping their keys to values, if there's a
## query.
params: table[string] of string &optional;
};
## Extracts URLs discovered in arbitrary text. ## Extracts URLs discovered in arbitrary text.
function find_all_urls(s: string): string_set function find_all_urls(s: string): string_set
{ {
@ -23,3 +45,84 @@ function find_all_urls_without_scheme(s: string): string_set
return return_urls; return return_urls;
} }
function decompose_uri(s: string): URI
{
local parts: string_array;
local u: URI = [$netlocation="", $path="/"];
if ( /\?/ in s)
{
# Parse query.
u$params = table();
parts = split1(s, /\?/);
s = parts[1];
local query: string = parts[2];
if ( /&/ in query )
{
local opv: table[count] of string = split(query, /&/);
for ( each in opv )
{
if ( /=/ in opv[each] )
{
parts = split1(opv[each], /=/);
u$params[parts[1]] = parts[2];
}
}
}
else
{
parts = split1(query, /=/);
u$params[parts[1]] = parts[2];
}
}
if ( /:\/\// in s )
{
# Parse scheme and remove from s.
parts = split1(s, /:\/\//);
u$scheme = parts[1];
s = parts[2];
}
if ( /\// in s )
{
# Parse path and remove from s.
parts = split1(s, /\//);
s = parts[1];
u$path = fmt("/%s", parts[2]);
if ( |u$path| > 1 && u$path[|u$path| - 1] != "/" )
{
local last_token: string = find_last(u$path, /\/.+/);
local full_filename = split1(last_token, /\//)[2];
if ( /\./ in full_filename )
{
u$file_name = full_filename;
u$file_base = split1(full_filename, /\./)[1];
u$file_ext = split1(full_filename, /\./)[2];
}
else
{
u$file_name = full_filename;
u$file_base = full_filename;
}
}
}
if ( /:/ in s )
{
# Parse location and port.
parts = split1(s, /:/);
u$netlocation = parts[1];
u$portnum = to_count(parts[2]);
}
else
u$netlocation = s;
return u;
}

View file

@ -66,6 +66,6 @@ function do_mhr_lookup(hash: string, fi: Notice::FileInfo)
event file_hash(f: fa_file, kind: string, hash: string) event file_hash(f: fa_file, kind: string, hash: string)
{ {
if ( kind=="sha1" && match_file_types in f$mime_type ) if ( kind == "sha1" && f?$mime_type && match_file_types in f$mime_type )
do_mhr_lookup(hash, Notice::create_file_info(f)); do_mhr_lookup(hash, Notice::create_file_info(f));
} }

Some files were not shown because too many files have changed in this diff Show more