diff --git a/CHANGES b/CHANGES index 6f0ed943ca..2cda402384 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,147 @@ +2.0-372 | 2012-05-17 13:59:45 -0700 + + * Fix compile errors. (Jon Siwek) + + * Linking in the DS docs. (Robin Sommer) + + * Fix mobility checksums unit test. (Jon Siwek) + +2.0-367 | 2012-05-17 12:42:30 -0700 + + * Adding support for binary output via DataSeries. See + logging-dataseries.rst for more information. (Gilbert Clark and + Robin Sommer) + + * Adding target update-doc-sources to top-level Makefile that runs + genDocSourcesList.sh. (Robin Sommer) + + * Moving trace for rotation test into traces directory. (Robin Sommer) + + * Fixing a rotation race condition at termination. (Robin Sommer) + + * Extending log post-processor call to include the name of the + writer. (Robin Sommer) + + * In threads, an internal error now immediately aborts. Otherwise, + the error won't make it back to the main thread for a while and + subsequent code in the thread would still execute. (Robin Sommer) + + * DataSeries cleanup. (Robin Sommer) + + * Fixing threads' DoFinish() method. It wasn't called reliably. Now, + it's always called before the thread is destroyed (assuming + processing has went normally so far). (Robin Sommer) + +2.0-341 | 2012-05-17 09:54:30 -0700 + + * Add a comment to explain the ICMPv6 error message types. (Daniel Thayer) + + * Quieting external test output somehwat. (Robin Sommer) + +2.0-336 | 2012-05-14 17:15:44 -0700 + + * Don't print the various "weird" events to stderr. Address #805. + (Daniel Thayer) + + * Generate icmp_error_message event for ICMPv6 error msgs. + Previously, icmp_sent was being generated, but icmp_error_message + contains more info. + + * Improved documentation comments for icmp-related events. (Daniel + Thayer) + +2.0-330 | 2012-05-14 17:05:56 -0700 + + * Add `addr_to_uri` script-level function that adds brackets to an + address if it's IPv6 and will be included in a URI or when a + ":" needs to be appended to it. (Jon Siwek) + + * Also add a test case for content extraction. (Jon Siwek) + + * Fix typos and improve INSTALL document. (Daniel Thayer) + + * Switching to new btest command TEST-SERIALIZE for communication + tests. (Robin Sommer) + +2.0-323 | 2012-05-04 21:04:34 -0700 + + * Add SHA1 and SHA256 hashing BIFs. Addresses #542. + + * Refactor all internal MD5 stuff to use OpenSSL's. (Jon Siwek) + + * Changes to open-file caching limits and uncached file unserialization. (Jon Siwek) + + - Unserializing files that were previously kicked out of the open-file + cache would cause them to be fopen'd with the original access + permissions which is usually 'w' and causes truncation. They + are now opened in 'a' mode. (addresses #780) + + - Add 'max_files_in_cache' script option to manually set the maximum + amount of opened files to keep cached. Mainly this just helped + to create a simple test case for the above change. + + - Remove unused NO_HAVE_SETRLIMIT preprocessor switch. + + - On systems that don't enforce a limit on number of files opened for + the process, raise default max size of open-file cache from + 32 to 512. + +2.0-319 | 2012-05-03 13:24:44 -0700 + + * SSL bugfixes and cleanup. (Seth Hall) + + - SSL related files and classes renamed to remove the "binpac" term. + + - A small fix for DPD scripts to make the DPD log more helpful if + there are multiple continued failures. + + - Fixed the SSL analyzer to make it stop doing repeated violation + messages for some handshake failures. + + - Added a $issuer_subject to the SSL log. + + - Created a basic test for SSL. + + - Fixed parsing of TLS server extensions. (Seth Hall) + +2.0-315 | 2012-05-03 11:44:17 -0700 + + * Add two more TLS extension values that we see in live traffic. + (Bernhard Amann) + + * Fixed IPv6 link local unicast CIDR and added IPv6 loopback to + private address space. (Seth Hall) + + * Fixed a problem where cluster workers were still processing + notices in some cases. (Seth Hall) + + * Added a configure option to specify the 'etc' directory. Addresses + #801. (Daniel Thayer) + + +2.0-306 | 2012-04-24 14:37:00 -0700 + + * Add further TLS extension values "extended_random" and + "heartbeat". (Seth Hall) + + * Fix problem with extracting FTP passwords and add "ftpuser" as + another anonymous username. (Seth Hall, discovered by Patrik + Lundin). + +2.0-303 | 2012-04-19 10:01:06 -0700 + + * Changes related to ICMPv6 Neighbor Discovery messages. (Jon Siwek) + + - The 'icmp_conn' record now contains an 'hlim' field since hop limit + in the IP header is an interesting field for at least these ND + messages. + + - Fixed and extended 'icmp_router_advertisement' event parameters. + + - Changed 'icmp_neighbor_advertisement' event parameters to add + more of the known boolean flags. + 2.0-301 | 2012-04-17 17:58:55 -0700 * Bro now support ICMPv6. (Matti Mantere, Jon Siwek, Robin Sommer, diff --git a/CMakeLists.txt b/CMakeLists.txt index d27fa2d40b..28b702ab01 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -107,6 +107,21 @@ if (GOOGLEPERFTOOLS_FOUND) endif () endif () +set(USE_DATASERIES false) +find_package(Lintel) +find_package(DataSeries) +find_package(LibXML2) + +if (LINTEL_FOUND AND DATASERIES_FOUND AND LIBXML2_FOUND) + set(USE_DATASERIES true) + include_directories(BEFORE ${Lintel_INCLUDE_DIR}) + include_directories(BEFORE ${DataSeries_INCLUDE_DIR}) + include_directories(BEFORE ${LibXML2_INCLUDE_DIR}) + list(APPEND OPTLIBS ${Lintel_LIBRARIES}) + list(APPEND OPTLIBS ${DataSeries_LIBRARIES}) + list(APPEND OPTLIBS ${LibXML2_LIBRARIES}) +endif() + if (ENABLE_PERFTOOLS_DEBUG) # Just a no op to prevent CMake from complaining about manually-specified # ENABLE_PERFTOOLS_DEBUG not being used if google perftools weren't found @@ -198,6 +213,7 @@ message( "\nGeoIP: ${USE_GEOIP}" "\nGoogle perftools: ${USE_PERFTOOLS}" "\n debugging: ${USE_PERFTOOLS_DEBUG}" + "\nDataSeries: ${USE_DATASERIES}" "\n" "\n================================================================\n" ) diff --git a/DocSourcesList.cmake b/DocSourcesList.cmake new file mode 100644 index 0000000000..1743b0258f --- /dev/null +++ b/DocSourcesList.cmake @@ -0,0 +1,144 @@ +# DO NOT EDIT +# This file is auto-generated from the genDocSourcesList.sh script. +# +# This is a list of Bro script sources for which to generate reST documentation. +# It will be included inline in the CMakeLists.txt found in the same directory +# in order to create Makefile targets that define how to generate reST from +# a given Bro script. +# +# Note: any path prefix of the script (2nd argument of rest_target macro) +# will be used to derive what path under scripts/ the generated documentation +# will be placed. + +set(psd ${PROJECT_SOURCE_DIR}/scripts) + +rest_target(${CMAKE_CURRENT_SOURCE_DIR} example.bro internal) +rest_target(${psd} base/init-default.bro internal) +rest_target(${psd} base/init-bare.bro internal) + +rest_target(${CMAKE_BINARY_DIR}/src base/bro.bif.bro) +rest_target(${CMAKE_BINARY_DIR}/src base/const.bif.bro) +rest_target(${CMAKE_BINARY_DIR}/src base/event.bif.bro) +rest_target(${CMAKE_BINARY_DIR}/src base/logging.bif.bro) +rest_target(${CMAKE_BINARY_DIR}/src base/reporter.bif.bro) +rest_target(${CMAKE_BINARY_DIR}/src base/strings.bif.bro) +rest_target(${CMAKE_BINARY_DIR}/src base/types.bif.bro) +rest_target(${psd} base/frameworks/cluster/main.bro) +rest_target(${psd} base/frameworks/cluster/nodes/manager.bro) +rest_target(${psd} base/frameworks/cluster/nodes/proxy.bro) +rest_target(${psd} base/frameworks/cluster/nodes/worker.bro) +rest_target(${psd} base/frameworks/cluster/setup-connections.bro) +rest_target(${psd} base/frameworks/communication/main.bro) +rest_target(${psd} base/frameworks/control/main.bro) +rest_target(${psd} base/frameworks/dpd/main.bro) +rest_target(${psd} base/frameworks/intel/main.bro) +rest_target(${psd} base/frameworks/logging/main.bro) +rest_target(${psd} base/frameworks/logging/postprocessors/scp.bro) +rest_target(${psd} base/frameworks/logging/postprocessors/sftp.bro) +rest_target(${psd} base/frameworks/logging/writers/ascii.bro) +rest_target(${psd} base/frameworks/logging/writers/dataseries.bro) +rest_target(${psd} base/frameworks/metrics/cluster.bro) +rest_target(${psd} base/frameworks/metrics/main.bro) +rest_target(${psd} base/frameworks/metrics/non-cluster.bro) +rest_target(${psd} base/frameworks/notice/actions/add-geodata.bro) +rest_target(${psd} base/frameworks/notice/actions/drop.bro) +rest_target(${psd} base/frameworks/notice/actions/email_admin.bro) +rest_target(${psd} base/frameworks/notice/actions/page.bro) +rest_target(${psd} base/frameworks/notice/actions/pp-alarms.bro) +rest_target(${psd} base/frameworks/notice/cluster.bro) +rest_target(${psd} base/frameworks/notice/extend-email/hostnames.bro) +rest_target(${psd} base/frameworks/notice/main.bro) +rest_target(${psd} base/frameworks/notice/weird.bro) +rest_target(${psd} base/frameworks/packet-filter/main.bro) +rest_target(${psd} base/frameworks/packet-filter/netstats.bro) +rest_target(${psd} base/frameworks/reporter/main.bro) +rest_target(${psd} base/frameworks/signatures/main.bro) +rest_target(${psd} base/frameworks/software/main.bro) +rest_target(${psd} base/protocols/conn/contents.bro) +rest_target(${psd} base/protocols/conn/inactivity.bro) +rest_target(${psd} base/protocols/conn/main.bro) +rest_target(${psd} base/protocols/dns/consts.bro) +rest_target(${psd} base/protocols/dns/main.bro) +rest_target(${psd} base/protocols/ftp/file-extract.bro) +rest_target(${psd} base/protocols/ftp/main.bro) +rest_target(${psd} base/protocols/ftp/utils-commands.bro) +rest_target(${psd} base/protocols/http/file-extract.bro) +rest_target(${psd} base/protocols/http/file-hash.bro) +rest_target(${psd} base/protocols/http/file-ident.bro) +rest_target(${psd} base/protocols/http/main.bro) +rest_target(${psd} base/protocols/http/utils.bro) +rest_target(${psd} base/protocols/irc/dcc-send.bro) +rest_target(${psd} base/protocols/irc/main.bro) +rest_target(${psd} base/protocols/smtp/entities-excerpt.bro) +rest_target(${psd} base/protocols/smtp/entities.bro) +rest_target(${psd} base/protocols/smtp/main.bro) +rest_target(${psd} base/protocols/ssh/main.bro) +rest_target(${psd} base/protocols/ssl/consts.bro) +rest_target(${psd} base/protocols/ssl/main.bro) +rest_target(${psd} base/protocols/ssl/mozilla-ca-list.bro) +rest_target(${psd} base/protocols/syslog/consts.bro) +rest_target(${psd} base/protocols/syslog/main.bro) +rest_target(${psd} base/utils/addrs.bro) +rest_target(${psd} base/utils/conn-ids.bro) +rest_target(${psd} base/utils/directions-and-hosts.bro) +rest_target(${psd} base/utils/files.bro) +rest_target(${psd} base/utils/numbers.bro) +rest_target(${psd} base/utils/paths.bro) +rest_target(${psd} base/utils/patterns.bro) +rest_target(${psd} base/utils/site.bro) +rest_target(${psd} base/utils/strings.bro) +rest_target(${psd} base/utils/thresholds.bro) +rest_target(${psd} policy/frameworks/communication/listen.bro) +rest_target(${psd} policy/frameworks/control/controllee.bro) +rest_target(${psd} policy/frameworks/control/controller.bro) +rest_target(${psd} policy/frameworks/dpd/detect-protocols.bro) +rest_target(${psd} policy/frameworks/dpd/packet-segment-logging.bro) +rest_target(${psd} policy/frameworks/metrics/conn-example.bro) +rest_target(${psd} policy/frameworks/metrics/http-example.bro) +rest_target(${psd} policy/frameworks/metrics/ssl-example.bro) +rest_target(${psd} policy/frameworks/software/version-changes.bro) +rest_target(${psd} policy/frameworks/software/vulnerable.bro) +rest_target(${psd} policy/integration/barnyard2/main.bro) +rest_target(${psd} policy/integration/barnyard2/types.bro) +rest_target(${psd} policy/misc/analysis-groups.bro) +rest_target(${psd} policy/misc/capture-loss.bro) +rest_target(${psd} policy/misc/loaded-scripts.bro) +rest_target(${psd} policy/misc/profiling.bro) +rest_target(${psd} policy/misc/stats.bro) +rest_target(${psd} policy/misc/trim-trace-file.bro) +rest_target(${psd} policy/protocols/conn/known-hosts.bro) +rest_target(${psd} policy/protocols/conn/known-services.bro) +rest_target(${psd} policy/protocols/conn/weirds.bro) +rest_target(${psd} policy/protocols/dns/auth-addl.bro) +rest_target(${psd} policy/protocols/dns/detect-external-names.bro) +rest_target(${psd} policy/protocols/ftp/detect.bro) +rest_target(${psd} policy/protocols/ftp/software.bro) +rest_target(${psd} policy/protocols/http/detect-MHR.bro) +rest_target(${psd} policy/protocols/http/detect-intel.bro) +rest_target(${psd} policy/protocols/http/detect-sqli.bro) +rest_target(${psd} policy/protocols/http/detect-webapps.bro) +rest_target(${psd} policy/protocols/http/header-names.bro) +rest_target(${psd} policy/protocols/http/software-browser-plugins.bro) +rest_target(${psd} policy/protocols/http/software.bro) +rest_target(${psd} policy/protocols/http/var-extraction-cookies.bro) +rest_target(${psd} policy/protocols/http/var-extraction-uri.bro) +rest_target(${psd} policy/protocols/smtp/blocklists.bro) +rest_target(${psd} policy/protocols/smtp/detect-suspicious-orig.bro) +rest_target(${psd} policy/protocols/smtp/software.bro) +rest_target(${psd} policy/protocols/ssh/detect-bruteforcing.bro) +rest_target(${psd} policy/protocols/ssh/geo-data.bro) +rest_target(${psd} policy/protocols/ssh/interesting-hostnames.bro) +rest_target(${psd} policy/protocols/ssh/software.bro) +rest_target(${psd} policy/protocols/ssl/cert-hash.bro) +rest_target(${psd} policy/protocols/ssl/expiring-certs.bro) +rest_target(${psd} policy/protocols/ssl/extract-certs-pem.bro) +rest_target(${psd} policy/protocols/ssl/known-certs.bro) +rest_target(${psd} policy/protocols/ssl/validate-certs.bro) +rest_target(${psd} policy/tuning/defaults/packet-fragments.bro) +rest_target(${psd} policy/tuning/defaults/warnings.bro) +rest_target(${psd} policy/tuning/track-all-assets.bro) +rest_target(${psd} site/local-manager.bro) +rest_target(${psd} site/local-proxy.bro) +rest_target(${psd} site/local-worker.bro) +rest_target(${psd} site/local.bro) +rest_target(${psd} test-all-policy.bro) diff --git a/INSTALL b/INSTALL index d4aa93d11f..d9f7963ec4 100644 --- a/INSTALL +++ b/INSTALL @@ -5,34 +5,44 @@ Installing Bro Prerequisites ============= -Bro relies on the following libraries and tools, which need to be installed +Bro requires the following libraries and tools to be installed before you begin: * CMake 2.6.3 or greater http://www.cmake.org - * Libpcap (headers and libraries) http://www.tcpdump.org + * Perl (used only during the Bro build process) - * OpenSSL (headers and libraries) http://www.openssl.org + * Libpcap headers and libraries http://www.tcpdump.org - * SWIG http://www.swig.org + * OpenSSL headers and libraries http://www.openssl.org + + * BIND8 headers and libraries * Libmagic * Libz -Bro can make uses of some optional libraries if they are found at -installation time: + * SWIG http://www.swig.org - * LibGeoIP For geo-locating IP addresses. - -Bro also needs the following tools, but on most systems they will -already come preinstalled: - - * Bash (For Bro Control). - * BIND8 (headers and libraries) * Bison (GNU Parser Generator) + * Flex (Fast Lexical Analyzer) - * Perl (Used only during the Bro build process) + + * Bash (for BroControl) + + +Bro can make use of some optional libraries and tools if they are found at +build time: + + * LibGeoIP (for geo-locating IP addresses) + + * gperftools (tcmalloc is used to improve memory and CPU usage) + + * sendmail (for BroControl) + + * ipsumdump (for trace-summary) http://www.cs.ucla.edu/~kohler/ipsumdump + + * Ruby executable, library, and headers (for Broccoli Ruby bindings) Installation @@ -44,7 +54,7 @@ To build and install into ``/usr/local/bro``:: make make install -This will first build Bro into a directory inside the distribution +This will first build Bro in a directory inside the distribution called ``build/``, using default build options. It then installs all required files into ``/usr/local/bro``, including the Bro binary in ``/usr/local/bro/bin/bro``. @@ -60,22 +70,22 @@ choices unless you are creating such a package. Run ``./configure --help`` for more options. Depending on the Bro package you downloaded, there may be auxiliary -tools and libraries available in the ``aux/`` directory. All of them -except for ``aux/bro-aux`` will also be built and installed by doing -``make install``. To install the programs that come in the -``aux/bro-aux`` directory, use ``make install-aux``. There are +tools and libraries available in the ``aux/`` directory. Some of them +will be automatically built and installed along with Bro. There are ``--disable-*`` options that can be given to the configure script to -turn off unwanted auxiliary projects. +turn off unwanted auxiliary projects that would otherwise be installed +automatically. Finally, use ``make install-aux`` to install some of +the other programs that are in the ``aux/bro-aux`` directory. -OpenBSD users, please see our `FAQ -` if you are having +OpenBSD users, please see our FAQ at +http://www.bro-ids.org/documentation/faq.html if you are having problems installing Bro. Running Bro =========== Bro is a complex program and it takes a bit of time to get familiar -with it. A good place for newcomers to start is the Quickstart Guide +with it. A good place for newcomers to start is the Quick Start Guide at http://www.bro-ids.org/documentation/quickstart.html. For developers that wish to run Bro directly from the ``build/`` diff --git a/Makefile b/Makefile index 455fa6ed88..8633c736a4 100644 --- a/Makefile +++ b/Makefile @@ -41,6 +41,9 @@ broxygen: configured broxygenclean: configured $(MAKE) -C $(BUILD) $@ +update-doc-sources: + ./doc/scripts/genDocSourcesList.sh ./doc/scripts/DocSourcesList.cmake + dist: @rm -rf $(VERSION_FULL) $(VERSION_FULL).tgz @rm -rf $(VERSION_MIN) $(VERSION_MIN).tgz diff --git a/NEWS b/NEWS index c48b243552..fe2d9b452e 100644 --- a/NEWS +++ b/NEWS @@ -47,6 +47,13 @@ Bro 2.1 joint set of events. The `icmp_conn` record got a new boolean field 'v6' that indicates whether the ICMP message is v4 or v6. +- Log postprocessor scripts get an additional argument indicating the + type of the log writer in use (e.g., "ascii"). + +- BroControl's make-archive-name scripts also receives the writer + type, but as it's 2nd(!) argument. If you're using a custom version + of that script, you need to adapt it. See the shipped version for + details. TODO: Extend. diff --git a/VERSION b/VERSION index ad5d9a37c3..9dbed189a5 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0-301 +2.0-372 diff --git a/aux/binpac b/aux/binpac index 71c37019bc..b4094cb75e 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 71c37019bc371eb7863fb6aa47a7daa4540f4f1f +Subproject commit b4094cb75e0a7769123f7db1f5d73f3f9f1c3977 diff --git a/aux/bro-aux b/aux/bro-aux index d885987e79..2038e3de04 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit d885987e7968669e34504b0403ac89bd13928e9a +Subproject commit 2038e3de042115c3caa706426e16c830c1fd1e9e diff --git a/aux/broccoli b/aux/broccoli index bead1168ae..95c93494d7 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit bead1168ae9c2d2ae216dd58522fbc05498ff2c8 +Subproject commit 95c93494d7192f69d30f208c4caa3bd38adda6fd diff --git a/aux/broctl b/aux/broctl index 44cc3de5f6..ba9e1aa2f2 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 44cc3de5f6f98a86b2516bdc48dd168e6a6a28fd +Subproject commit ba9e1aa2f2159deac0cf96863f54405643764df0 diff --git a/aux/btest b/aux/btest index 1897d224ce..e0da8d0e28 160000 --- a/aux/btest +++ b/aux/btest @@ -1 +1 @@ -Subproject commit 1897d224ce295e91d20e458851759c99734a0a74 +Subproject commit e0da8d0e284bbebbaef711c91c1b961580f225d2 diff --git a/cmake b/cmake index 49278736c1..96f3d92aca 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 49278736c1404cb8c077272b80312c947e68bf52 +Subproject commit 96f3d92acadbe1ae64f410e974c5ff503903394b diff --git a/config.h.in b/config.h.in index b8e2cb9a88..c2cb3ec1dc 100644 --- a/config.h.in +++ b/config.h.in @@ -114,6 +114,9 @@ /* Analyze Mobile IPv6 traffic */ #cmakedefine ENABLE_MOBILE_IPV6 +/* Use the DataSeries writer. */ +#cmakedefine USE_DATASERIES + /* Version number of package */ #define VERSION "@VERSION@" diff --git a/configure b/configure index 3c1cca8c9d..3258d4abfc 100755 --- a/configure +++ b/configure @@ -24,6 +24,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --prefix=PREFIX installation directory [/usr/local/bro] --scriptdir=PATH root installation directory for Bro scripts [PREFIX/share/bro] + --conf-files-dir=PATH config files installation directory [PREFIX/etc] Optional Features: --enable-debug compile in debugging mode @@ -31,7 +32,7 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --enable-perftools-debug use Google's perftools for debugging --disable-broccoli don't build or install the Broccoli library --disable-broctl don't install Broctl - --disable-auxtools don't build or install auxilliary tools + --disable-auxtools don't build or install auxiliary tools --disable-python don't try to build python bindings for broccoli --disable-ruby don't try to build ruby bindings for broccoli @@ -55,6 +56,8 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --with-ruby-lib=PATH path to ruby library --with-ruby-inc=PATH path to ruby headers --with-swig=PATH path to SWIG executable + --with-dataseries=PATH path to DataSeries and Lintel libraries + --with-xml2=PATH path to libxml2 installation (for DataSeries) Packaging Options (for developers): --binary-package toggle special logic for binary packaging @@ -86,13 +89,15 @@ append_cache_entry () { # set defaults builddir=build +prefix=/usr/local/bro CMakeCacheEntries="" -append_cache_entry CMAKE_INSTALL_PREFIX PATH /usr/local/bro -append_cache_entry BRO_ROOT_DIR PATH /usr/local/bro -append_cache_entry PY_MOD_INSTALL_DIR PATH /usr/local/bro/lib/broctl -append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING /usr/local/bro/share/bro +append_cache_entry CMAKE_INSTALL_PREFIX PATH $prefix +append_cache_entry BRO_ROOT_DIR PATH $prefix +append_cache_entry PY_MOD_INSTALL_DIR PATH $prefix/lib/broctl +append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro +append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc append_cache_entry ENABLE_DEBUG BOOL false -append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false +append_cache_entry ENABLE_PERFTOOLS_DEBUG BOOL false append_cache_entry BinPAC_SKIP_INSTALL BOOL true append_cache_entry BUILD_SHARED_LIBS BOOL true append_cache_entry INSTALL_AUX_TOOLS BOOL true @@ -120,17 +125,19 @@ while [ $# -ne 0 ]; do CMakeGenerator="$optarg" ;; --prefix=*) + prefix=$optarg append_cache_entry CMAKE_INSTALL_PREFIX PATH $optarg append_cache_entry BRO_ROOT_DIR PATH $optarg append_cache_entry PY_MOD_INSTALL_DIR PATH $optarg/lib/broctl - if [ "$user_set_scriptdir" != "true" ]; then - append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg/share/bro - fi ;; --scriptdir=*) append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $optarg user_set_scriptdir="true" ;; + --conf-files-dir=*) + append_cache_entry BRO_ETC_INSTALL_DIR PATH $optarg + user_set_conffilesdir="true" + ;; --enable-debug) append_cache_entry ENABLE_DEBUG BOOL true ;; @@ -208,6 +215,13 @@ while [ $# -ne 0 ]; do --with-swig=*) append_cache_entry SWIG_EXECUTABLE PATH $optarg ;; + --with-dataseries=*) + append_cache_entry DataSeries_ROOT_DIR PATH $optarg + append_cache_entry Lintel_ROOT_DIR PATH $optarg + ;; + --with-xml2=*) + append_cache_entry LibXML2_ROOT_DIR PATH $optarg + ;; --binary-package) append_cache_entry BINARY_PACKAGING_MODE BOOL true ;; @@ -231,6 +245,14 @@ while [ $# -ne 0 ]; do shift done +if [ "$user_set_scriptdir" != "true" ]; then + append_cache_entry BRO_SCRIPT_INSTALL_PATH STRING $prefix/share/bro +fi + +if [ "$user_set_conffilesdir" != "true" ]; then + append_cache_entry BRO_ETC_INSTALL_DIR PATH $prefix/etc +fi + if [ -d $builddir ]; then # If build directory exists, check if it has a CMake cache if [ -f $builddir/CMakeCache.txt ]; then diff --git a/doc/logging-dataseries.rst b/doc/logging-dataseries.rst new file mode 100644 index 0000000000..b41b9fb0b7 --- /dev/null +++ b/doc/logging-dataseries.rst @@ -0,0 +1,168 @@ + +============================= +Binary Output with DataSeries +============================= + +.. rst-class:: opening + + Bro's default ASCII log format is not exactly the most efficient + way for storing large volumes of data. An an alternative, Bro comes + with experimental support for `DataSeries + `_ + output, an efficient binary format for recording structured bulk + data. DataSeries is developed and maintained at HP Labs. + +.. contents:: + +Installing DataSeries +--------------------- + +To use DataSeries, its libraries must be available at compile-time, +along with the supporting *Lintel* package. Generally, both are +distributed on `HP Labs' web site +`_. Currently, however, you need +to use recent developments versions for both packages, which you can +download from github like this:: + + git clone http://github.com/dataseries/Lintel + git clone http://github.com/dataseries/DataSeries + +To build and install the two into ````, do:: + + ( cd Lintel && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX= .. && make && make install ) + ( cd DataSeries && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX= .. && make && make install ) + +Please refer to the packages' documentation for more information about +the installation process. In particular, there's more information on +required and optional `dependencies for Lintel +`_ +and `dependencies for DataSeries +`_ + +Compiling Bro with DataSeries Support +------------------------------------- + +Once you have installed DataSeries, Bro's ``configure`` should pick it +up automatically as long as it finds it in a standard system location. +Alternatively, you can specify the DataSeries installation prefix +manually with ``--with-dataseries=``. Keep an eye on +``configure``'s summary output, if it looks like the following, Bro +found DataSeries and will compile in the support:: + + # ./configure --with-dataseries=/usr/local + [...] + ====================| Bro Build Summary |===================== + [...] + DataSeries: true + [...] + ================================================================ + +Activating DataSeries +--------------------- + +The direct way to use DataSeries is to switch *all* log files over to +the binary format. To do that, just add ``redef +Log::default_writer=Log::WRITER_DATASERIES;`` to your ``local.bro``. +For testing, you can also just pass that on the command line:: + + bro -r trace.pcap Log::default_writer=Log::WRITER_DATASERIES + +With that, Bro will now write all its output into DataSeries files +``*.ds``. You can inspect these using DataSeries's set of command line +tools, which its installation process installs into ``/bin``. +For example, to convert a file back into an ASCII representation:: + + $ ds2txt conn.log + [... We skip a bunch of meta data here ...] + ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes + 1300475167.096535 CRCC5OdDlXe 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 + 1300475167.097012 o7XBsfvo3U1 fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0.000000 0 0 S0 F 0 D 1 199 0 0 + 1300475167.099816 pXPi1kPMgxb 141.142.220.50 5353 224.0.0.251 5353 udp 0.000000 0 0 S0 F 0 D 1 179 0 0 + 1300475168.853899 R7sOc16woCj 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 38 89 SF F 0 Dd 1 66 1 117 + 1300475168.854378 Z6dfHVmt0X7 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 52 99 SF F 0 Dd 1 80 1 127 + 1300475168.854837 k6T92WxgNAh 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF F 0 Dd 1 66 1 211 + [...] + +(``--skip-all`` suppresses the meta data.) + +Note that the ASCII conversion is *not* equivalent to Bro's default +output format. + +You can also switch only individual files over to DataSeries by adding +code like this to your ``local.bro``:: + +.. code:: bro + + event bro_init() + { + local f = Log::get_filter(Conn::LOG, "default"); # Get default filter for connection log. + f$writer = Log::WRITER_DATASERIES; # Change writer type. + Log::add_filter(Conn::LOG, f); # Replace filter with adapted version. + } + +Bro's DataSeries writer comes with a few tuning options, see +:doc:`scripts/base/frameworks/logging/writers/dataseries`. + +Working with DataSeries +======================= + +Here are few examples of using DataSeries command line tools to work +with the output files. + +* Printing CSV:: + + $ ds2txt --csv conn.log + ts,uid,id.orig_h,id.orig_p,id.resp_h,id.resp_p,proto,service,duration,orig_bytes,resp_bytes,conn_state,local_orig,missed_bytes,history,orig_pkts,orig_ip_bytes,resp_pkts,resp_ip_bytes + 1258790493.773208,ZTtgbHvf4s3,192.168.1.104,137,192.168.1.255,137,udp,dns,3.748891,350,0,S0,F,0,D,7,546,0,0 + 1258790451.402091,pOY6Rw7lhUd,192.168.1.106,138,192.168.1.255,138,udp,,0.000000,0,0,S0,F,0,D,1,229,0,0 + 1258790493.787448,pn5IiEslca9,192.168.1.104,138,192.168.1.255,138,udp,,2.243339,348,0,S0,F,0,D,2,404,0,0 + 1258790615.268111,D9slyIu3hFj,192.168.1.106,137,192.168.1.255,137,udp,dns,3.764626,350,0,S0,F,0,D,7,546,0,0 + [...] + + Add ``--separator=X`` to set a different separator. + +* Extracting a subset of columns:: + + $ ds2txt --select '*' ts,id.resp_h,id.resp_p --skip-all conn.log + 1258790493.773208 192.168.1.255 137 + 1258790451.402091 192.168.1.255 138 + 1258790493.787448 192.168.1.255 138 + 1258790615.268111 192.168.1.255 137 + 1258790615.289842 192.168.1.255 138 + [...] + +* Filtering rows:: + + $ ds2txt --where '*' 'duration > 5 && id.resp_p > 1024' --skip-all conn.ds + 1258790631.532888 V8mV5WLITu5 192.168.1.105 55890 239.255.255.250 1900 udp 15.004568 798 0 S0 F 0 D 6 966 0 0 + 1258792413.439596 tMcWVWQptvd 192.168.1.105 55890 239.255.255.250 1900 udp 15.004581 798 0 S0 F 0 D 6 966 0 0 + 1258794195.346127 cQwQMRdBrKa 192.168.1.105 55890 239.255.255.250 1900 udp 15.005071 798 0 S0 F 0 D 6 966 0 0 + 1258795977.253200 i8TEjhWd2W8 192.168.1.105 55890 239.255.255.250 1900 udp 15.004824 798 0 S0 F 0 D 6 966 0 0 + 1258797759.160217 MsLsBA8Ia49 192.168.1.105 55890 239.255.255.250 1900 udp 15.005078 798 0 S0 F 0 D 6 966 0 0 + 1258799541.068452 TsOxRWJRGwf 192.168.1.105 55890 239.255.255.250 1900 udp 15.004082 798 0 S0 F 0 D 6 966 0 0 + [...] + +* Calculate some statistics: + + Mean/stdev/min/max over a column:: + + $ dsstatgroupby '*' basic duration from conn.ds + # Begin DSStatGroupByModule + # processed 2159 rows, where clause eliminated 0 rows + # count(*), mean(duration), stddev, min, max + 2159, 42.7938, 1858.34, 0, 86370 + [...] + + Quantiles of total connection volume:: + + > dsstatgroupby '*' quantile 'orig_bytes + resp_bytes' from conn.ds + [...] + 2159 data points, mean 24616 +- 343295 [0,1.26615e+07] + quantiles about every 216 data points: + 10%: 0, 124, 317, 348, 350, 350, 601, 798, 1469 + tails: 90%: 1469, 95%: 7302, 99%: 242629, 99.5%: 1226262 + [...] + +The ``man`` pages for these tool show further options, and their +``-h`` option gives some more information (either can be a bit cryptic +unfortunately though). diff --git a/doc/logging.rst b/doc/logging.rst index 30a793df7d..384996c28a 100644 --- a/doc/logging.rst +++ b/doc/logging.rst @@ -373,3 +373,13 @@ record, care must be given to when and how long data is stored. Normally data saved to the connection record will remain there for the duration of the connection and from a practical perspective it's not uncommon to need to delete that data before the end of the connection. + +Other Writers +------------- + +Bro support the following output formats other than ASCII: + +.. toctree:: + :maxdepth: 1 + + logging-dataseries diff --git a/doc/scripts/DocSourcesList.cmake b/doc/scripts/DocSourcesList.cmake index a35d6894d1..521113ddd2 100644 --- a/doc/scripts/DocSourcesList.cmake +++ b/doc/scripts/DocSourcesList.cmake @@ -36,6 +36,7 @@ rest_target(${psd} base/frameworks/logging/main.bro) rest_target(${psd} base/frameworks/logging/postprocessors/scp.bro) rest_target(${psd} base/frameworks/logging/postprocessors/sftp.bro) rest_target(${psd} base/frameworks/logging/writers/ascii.bro) +rest_target(${psd} base/frameworks/logging/writers/dataseries.bro) rest_target(${psd} base/frameworks/metrics/cluster.bro) rest_target(${psd} base/frameworks/metrics/main.bro) rest_target(${psd} base/frameworks/metrics/non-cluster.bro) diff --git a/scripts/base/frameworks/dpd/main.bro b/scripts/base/frameworks/dpd/main.bro index e8488c3ec1..9eb0b467f8 100644 --- a/scripts/base/frameworks/dpd/main.bro +++ b/scripts/base/frameworks/dpd/main.bro @@ -105,5 +105,8 @@ event protocol_violation(c: connection, atype: count, aid: count, reason: string) &priority=-5 { if ( c?$dpd ) + { Log::write(DPD::LOG, c$dpd); + delete c$dpd; + } } diff --git a/scripts/base/frameworks/logging/__load__.bro b/scripts/base/frameworks/logging/__load__.bro index 42b2d7c564..17e03e2ef7 100644 --- a/scripts/base/frameworks/logging/__load__.bro +++ b/scripts/base/frameworks/logging/__load__.bro @@ -1,3 +1,4 @@ @load ./main @load ./postprocessors @load ./writers/ascii +@load ./writers/dataseries diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index 2c36b3001e..bec5f31dc6 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -332,7 +332,7 @@ function __default_rotation_postprocessor(info: RotationInfo) : bool function default_path_func(id: ID, path: string, rec: any) : string { local id_str = fmt("%s", id); - + local parts = split1(id_str, /::/); if ( |parts| == 2 ) { @@ -340,7 +340,7 @@ function default_path_func(id: ID, path: string, rec: any) : string # or a filter path explicitly set by the user, so continue using it. if ( path != "" ) return path; - + # Example: Notice::LOG -> "notice" if ( parts[2] == "LOG" ) { @@ -356,11 +356,11 @@ function default_path_func(id: ID, path: string, rec: any) : string output = cat(output, sub_bytes(module_parts[4],1,1), "_", sub_bytes(module_parts[4], 2, |module_parts[4]|)); return to_lower(output); } - + # Example: Notice::POLICY_LOG -> "notice_policy" if ( /_LOG$/ in parts[2] ) parts[2] = sub(parts[2], /_LOG$/, ""); - + return cat(to_lower(parts[1]),"_",to_lower(parts[2])); } else @@ -376,13 +376,16 @@ function run_rotation_postprocessor_cmd(info: RotationInfo, npath: string) : boo if ( pp_cmd == "" ) return T; + # Turn, e.g., Log::WRITER_ASCII into "ascii". + local writer = subst_string(to_lower(fmt("%s", info$writer)), "log::writer_", ""); + # The date format is hard-coded here to provide a standardized # script interface. - system(fmt("%s %s %s %s %s %d", + system(fmt("%s %s %s %s %s %d %s", pp_cmd, npath, info$path, strftime("%y-%m-%d_%H.%M.%S", info$open), strftime("%y-%m-%d_%H.%M.%S", info$close), - info$terminating)); + info$terminating, writer)); return T; } @@ -407,7 +410,7 @@ function add_filter(id: ID, filter: Filter) : bool # definition. if ( ! filter?$path_func ) filter$path_func = default_path_func; - + filters[id, filter$name] = filter; return __add_filter(id, filter); } diff --git a/scripts/base/frameworks/logging/writers/dataseries.bro b/scripts/base/frameworks/logging/writers/dataseries.bro new file mode 100644 index 0000000000..ccee500c3a --- /dev/null +++ b/scripts/base/frameworks/logging/writers/dataseries.bro @@ -0,0 +1,60 @@ +##! Interface for the DataSeries log writer. + +module LogDataSeries; + +export { + ## Compression to use with the DS output file. Options are: + ## + ## 'none' -- No compression. + ## 'lzf' -- LZF compression. Very quick, but leads to larger output files. + ## 'lzo' -- LZO compression. Very fast decompression times. + ## 'gz' -- GZIP compression. Slower than LZF, but also produces smaller output. + ## 'bz2' -- BZIP2 compression. Slower than GZIP, but also produces smaller output. + const compression = "lzo" &redef; + + ## The extent buffer size. + ## Larger values here lead to better compression and more efficient writes, but + ## also increase the lag between the time events are received and the time they + ## are actually written to disk. + const extent_size = 65536 &redef; + + ## Should we dump the XML schema we use for this DS file to disk? + ## If yes, the XML schema shares the name of the logfile, but has + ## an XML ending. + const dump_schema = F &redef; + + ## How many threads should DataSeries spawn to perform compression? + ## Note that this dictates the number of threads per log stream. If + ## you're using a lot of streams, you may want to keep this number + ## relatively small. + ## + ## Default value is 1, which will spawn one thread / stream. + ## + ## Maximum is 128, minimum is 1. + const num_threads = 1 &redef; + + ## Should time be stored as an integer or a double? + ## Storing time as a double leads to possible precision issues and + ## can (significantly) increase the size of the resulting DS log. + ## That said, timestamps stored in double form are consistent + ## with the rest of Bro, including the standard ASCII log. Hence, we + ## use them by default. + const use_integer_for_time = F &redef; +} + +# Default function to postprocess a rotated DataSeries log file. It moves the +# rotated file to a new name that includes a timestamp with the opening time, and +# then runs the writer's default postprocessor command on it. +function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool + { + # Move file to name including both opening and closing time. + local dst = fmt("%s.%s.ds", info$path, + strftime(Log::default_rotation_date_format, info$open)); + + system(fmt("/bin/mv %s %s", info$fname, dst)); + + # Run default postprocessor. + return Log::run_rotation_postprocessor_cmd(info, dst); + } + +redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func }; diff --git a/scripts/base/frameworks/notice/cluster.bro b/scripts/base/frameworks/notice/cluster.bro index 281901cf31..087c3ead51 100644 --- a/scripts/base/frameworks/notice/cluster.bro +++ b/scripts/base/frameworks/notice/cluster.bro @@ -23,7 +23,10 @@ redef Cluster::worker2manager_events += /Notice::cluster_notice/; @if ( Cluster::local_node_type() != Cluster::MANAGER ) # The notice policy is completely handled by the manager and shouldn't be # done by workers or proxies to save time for packet processing. -redef policy = {}; +event bro_init() &priority=-11 + { + Notice::policy = table(); + } event Notice::begin_suppression(n: Notice::Info) { diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index cc798ecdc5..060d36aad4 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -92,6 +92,7 @@ type icmp_conn: record { itype: count; ##< The ICMP type of the packet that triggered the instantiation of the record. icode: count; ##< The ICMP code of the packet that triggered the instantiation of the record. len: count; ##< The length of the ICMP payload of the packet that triggered the instantiation of the record. + hlim: count; ##< The encapsulating IP header's Hop Limit value. v6: bool; ##< True if it's an ICMPv6 packet. }; @@ -2347,6 +2348,11 @@ type bt_tracker_headers: table[string] of string; ## BPF filter the user has set via the -f command line options. Empty if none. const cmd_line_bpf_filter = "" &redef; +## The maximum number of open files to keep cached at a given time. +## If set to zero, this is automatically determined by inspecting +## the current/maximum limit on open files for the process. +const max_files_in_cache = 0 &redef; + ## Deprecated. const log_rotate_interval = 0 sec &redef; diff --git a/scripts/base/protocols/ftp/main.bro b/scripts/base/protocols/ftp/main.bro index e6c0131337..7c5bbaefdc 100644 --- a/scripts/base/protocols/ftp/main.bro +++ b/scripts/base/protocols/ftp/main.bro @@ -6,6 +6,7 @@ @load ./utils-commands @load base/utils/paths @load base/utils/numbers +@load base/utils/addrs module FTP; @@ -22,7 +23,7 @@ export { const default_capture_password = F &redef; ## User IDs that can be considered "anonymous". - const guest_ids = { "anonymous", "ftp", "guest" } &redef; + const guest_ids = { "anonymous", "ftp", "ftpuser", "guest" } &redef; type Info: record { ## Time when the command was sent. @@ -160,17 +161,16 @@ function ftp_message(s: Info) # or it's a deliberately logged command. if ( |s$tags| > 0 || (s?$cmdarg && s$cmdarg$cmd in logged_commands) ) { - if ( s?$password && to_lower(s$user) !in guest_ids ) + if ( s?$password && + ! s$capture_password && + to_lower(s$user) !in guest_ids ) + { s$password = ""; + } local arg = s$cmdarg$arg; if ( s$cmdarg$cmd in file_cmds ) - { - if ( is_v4_addr(s$id$resp_h) ) - arg = fmt("ftp://%s%s", s$id$resp_h, build_path_compressed(s$cwd, arg)); - else - arg = fmt("ftp://[%s]%s", s$id$resp_h, build_path_compressed(s$cwd, arg)); - } + arg = fmt("ftp://%s%s", addr_to_uri(s$id$resp_h), build_path_compressed(s$cwd, arg)); s$ts=s$cmdarg$ts; s$command=s$cmdarg$cmd; diff --git a/scripts/base/protocols/http/utils.bro b/scripts/base/protocols/http/utils.bro index 0f2666fade..a74a2fe696 100644 --- a/scripts/base/protocols/http/utils.bro +++ b/scripts/base/protocols/http/utils.bro @@ -1,6 +1,7 @@ ##! Utilities specific for HTTP processing. @load ./main +@load base/utils/addrs module HTTP; @@ -51,7 +52,7 @@ function extract_keys(data: string, kv_splitter: pattern): string_vec function build_url(rec: Info): string { local uri = rec?$uri ? rec$uri : "/"; - local host = rec?$host ? rec$host : fmt("%s", rec$id$resp_h); + local host = rec?$host ? rec$host : addr_to_uri(rec$id$resp_h); if ( rec$id$resp_p != 80/tcp ) host = fmt("%s:%s", host, rec$id$resp_p); return fmt("%s%s", host, uri); diff --git a/scripts/base/protocols/ssl/consts.bro b/scripts/base/protocols/ssl/consts.bro index ab130c4318..42989a4cb9 100644 --- a/scripts/base/protocols/ssl/consts.bro +++ b/scripts/base/protocols/ssl/consts.bro @@ -77,8 +77,12 @@ export { [12] = "srp", [13] = "signature_algorithms", [14] = "use_srtp", + [15] = "heartbeat", [35] = "SessionTicket TLS", + [40] = "extended_random", [13172] = "next_protocol_negotiation", + [13175] = "origin_bound_certificates", + [13180] = "encrypted_client_certificates", [65281] = "renegotiation_info" } &default=function(i: count):string { return fmt("unknown-%d", i); }; diff --git a/scripts/base/protocols/ssl/main.bro b/scripts/base/protocols/ssl/main.bro index 0b280a6bcf..b5f74d5122 100644 --- a/scripts/base/protocols/ssl/main.bro +++ b/scripts/base/protocols/ssl/main.bro @@ -24,6 +24,8 @@ export { session_id: string &log &optional; ## Subject of the X.509 certificate offered by the server. subject: string &log &optional; + ## Subject of the signer of the X.509 certificate offered by the server. + issuer_subject: string &log &optional; ## NotValidBefore field value from the server certificate. not_valid_before: time &log &optional; ## NotValidAfter field value from the serve certificate. @@ -146,6 +148,7 @@ event x509_certificate(c: connection, is_orig: bool, cert: X509, chain_idx: coun # Also save other certificate information about the primary cert. c$ssl$subject = cert$subject; + c$ssl$issuer_subject = cert$issuer; c$ssl$not_valid_before = cert$not_valid_before; c$ssl$not_valid_after = cert$not_valid_after; } diff --git a/scripts/base/utils/addrs.bro b/scripts/base/utils/addrs.bro index 415b9adfa9..08efd5281a 100644 --- a/scripts/base/utils/addrs.bro +++ b/scripts/base/utils/addrs.bro @@ -98,3 +98,18 @@ function find_ip_addresses(input: string): string_array } return output; } + +## Returns the string representation of an IP address suitable for inclusion +## in a URI. For IPv4, this does no special formatting, but for IPv6, the +## address is included in square brackets. +## +## a: the address to make suitable for URI inclusion. +## +## Returns: the string representation of *a* suitable for URI inclusion. +function addr_to_uri(a: addr): string + { + if ( is_v4_addr(a) ) + return fmt("%s", a); + else + return fmt("[%s]", a); + } diff --git a/scripts/base/utils/files.bro b/scripts/base/utils/files.bro index 8111245c24..ccd03df0e6 100644 --- a/scripts/base/utils/files.bro +++ b/scripts/base/utils/files.bro @@ -1,10 +1,11 @@ +@load ./addrs ## This function can be used to generate a consistent filename for when ## contents of a file, stream, or connection are being extracted to disk. function generate_extraction_filename(prefix: string, c: connection, suffix: string): string { - local conn_info = fmt("%s:%d-%s:%d", - c$id$orig_h, c$id$orig_p, c$id$resp_h, c$id$resp_p); + local conn_info = fmt("%s:%d-%s:%d", addr_to_uri(c$id$orig_h), c$id$orig_p, + addr_to_uri(c$id$resp_h), c$id$resp_p); if ( prefix != "" ) conn_info = fmt("%s_%s", prefix, conn_info); diff --git a/scripts/base/utils/site.bro b/scripts/base/utils/site.bro index 4aeb70fe3f..55ee0e5ed1 100644 --- a/scripts/base/utils/site.bro +++ b/scripts/base/utils/site.bro @@ -8,27 +8,31 @@ export { ## Address space that is considered private and unrouted. ## By default it has RFC defined non-routable IPv4 address space. const private_address_space: set[subnet] = { - 10.0.0.0/8, - 192.168.0.0/16, - 127.0.0.0/8, - 172.16.0.0/12 + 10.0.0.0/8, + 192.168.0.0/16, + 172.16.0.0/12, + 100.64.0.0/10, # RFC6598 Carrier Grade NAT + 127.0.0.0/8, + [fe80::]/10, + [::1]/128, } &redef; ## Networks that are considered "local". const local_nets: set[subnet] &redef; - - ## This is used for retrieving the subnet when you multiple - ## :bro:id:`Site::local_nets`. A membership query can be done with an - ## :bro:type:`addr` and the table will yield the subnet it was found + + ## This is used for retrieving the subnet when using multiple entries in + ## :bro:id:`Site::local_nets`. It's populated automatically from there. + ## A membership query can be done with an + ## :bro:type:`addr` and the table will yield the subnet it was found ## within. global local_nets_table: table[subnet] of subnet = {}; ## Networks that are considered "neighbors". const neighbor_nets: set[subnet] &redef; - + ## If local network administrators are known and they have responsibility ## for defined address space, then a mapping can be defined here between - ## networks for which they have responsibility and a set of email + ## networks for which they have responsibility and a set of email ## addresses. const local_admins: table[subnet] of set[string] = {} &redef; @@ -40,27 +44,33 @@ export { ## Function that returns true if an address corresponds to one of ## the local networks, false if not. + ## The function inspects :bro:id:`Site::local_nets`. global is_local_addr: function(a: addr): bool; - + ## Function that returns true if an address corresponds to one of ## the neighbor networks, false if not. + ## The function inspects :bro:id:`Site::neighbor_nets`. global is_neighbor_addr: function(a: addr): bool; - + ## Function that returns true if an address corresponds to one of ## the private/unrouted networks, false if not. + ## The function inspects :bro:id:`Site::private_address_space`. global is_private_addr: function(a: addr): bool; - ## Function that returns true if a host name is within a local + ## Function that returns true if a host name is within a local ## DNS zone. + ## The function inspects :bro:id:`Site::local_zones`. global is_local_name: function(name: string): bool; - - ## Function that returns true if a host name is within a neighbor + + ## Function that returns true if a host name is within a neighbor ## DNS zone. + ## The function inspects :bro:id:`Site::neighbor_zones`. global is_neighbor_name: function(name: string): bool; - + ## Function that returns a common separated list of email addresses ## that are considered administrators for the IP address provided as ## an argument. + ## The function inspects :bro:id:`Site::local_admins`. global get_emails: function(a: addr): string; } @@ -73,22 +83,22 @@ function is_local_addr(a: addr): bool { return a in local_nets; } - + function is_neighbor_addr(a: addr): bool { return a in neighbor_nets; } - + function is_private_addr(a: addr): bool { return a in private_address_space; } - + function is_local_name(name: string): bool { return local_dns_suffix_regex in name; } - + function is_neighbor_name(name: string): bool { return local_dns_neighbor_suffix_regex in name; @@ -96,7 +106,7 @@ function is_neighbor_name(name: string): bool # This is a hack for doing a for loop. const one_to_32: vector of count = {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32}; - + # TODO: make this work with IPv6 function find_all_emails(ip: addr): set[string] { diff --git a/src/Analyzer.cc b/src/Analyzer.cc index f9570d707f..c72af2a44a 100644 --- a/src/Analyzer.cc +++ b/src/Analyzer.cc @@ -36,7 +36,7 @@ #include "POP3.h" #include "SOCKS.h" #include "SSH.h" -#include "SSL-binpac.h" +#include "SSL.h" #include "Syslog-binpac.h" #include "ConnSizeAnalyzer.h" @@ -123,8 +123,8 @@ const Analyzer::Config Analyzer::analyzer_configs[] = { HTTP_Analyzer_binpac::InstantiateAnalyzer, HTTP_Analyzer_binpac::Available, 0, false }, { AnalyzerTag::SSL, "SSL", - SSL_Analyzer_binpac::InstantiateAnalyzer, - SSL_Analyzer_binpac::Available, 0, false }, + SSL_Analyzer::InstantiateAnalyzer, + SSL_Analyzer::Available, 0, false }, { AnalyzerTag::SYSLOG_BINPAC, "SYSLOG_BINPAC", Syslog_Analyzer_binpac::InstantiateAnalyzer, Syslog_Analyzer_binpac::Available, 0, false }, diff --git a/src/Anon.cc b/src/Anon.cc index d2a28a0e08..f58057b2fc 100644 --- a/src/Anon.cc +++ b/src/Anon.cc @@ -5,7 +5,6 @@ #include "util.h" #include "net_util.h" -#include "md5.h" #include "Anon.h" #include "Val.h" #include "NetVar.h" diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index fca9a62c7c..50875cbcca 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -252,7 +252,6 @@ add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/DebugCmdConstants.h WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} ) -set(dns_SRCS nb_dns.c) set_source_files_properties(nb_dns.c PROPERTIES COMPILE_FLAGS -fno-strict-aliasing) @@ -383,7 +382,7 @@ set(bro_SRCS SMTP.cc SOCKS.cc SSH.cc - SSL-binpac.cc + SSL.cc Scope.cc SerializationFormat.cc SerialObj.cc @@ -411,7 +410,6 @@ set(bro_SRCS bsd-getopt-long.c bro_inet_ntop.c cq.c - md5.c patricia.c setsignal.c PacketDumper.cc @@ -427,10 +425,11 @@ set(bro_SRCS logging/WriterBackend.cc logging/WriterFrontend.cc logging/writers/Ascii.cc + logging/writers/DataSeries.cc logging/writers/None.cc - ${dns_SRCS} - ${openssl_SRCS} + nb_dns.c + digest.h ) collect_headers(bro_HEADERS ${bro_SRCS}) diff --git a/src/DFA.cc b/src/DFA.cc index e58ea260e5..06ccfd9342 100644 --- a/src/DFA.cc +++ b/src/DFA.cc @@ -2,9 +2,10 @@ #include "config.h" +#include + #include "EquivClass.h" #include "DFA.h" -#include "md5.h" int dfa_state_cache_size = 10000; @@ -312,8 +313,8 @@ DFA_State* DFA_State_Cache::Lookup(const NFA_state_list& nfas, { // We assume that state ID's don't exceed 10 digits, plus // we allow one more character for the delimiter. - md5_byte_t id_tag[nfas.length() * 11 + 1]; - md5_byte_t* p = id_tag; + u_char id_tag[nfas.length() * 11 + 1]; + u_char* p = id_tag; for ( int i = 0; i < nfas.length(); ++i ) { @@ -335,12 +336,8 @@ DFA_State* DFA_State_Cache::Lookup(const NFA_state_list& nfas, // We use the short MD5 instead of the full string for the // HashKey because the data is copied into the key. - md5_state_t state; - md5_byte_t digest[16]; - - md5_init(&state); - md5_append(&state, id_tag, p - id_tag); - md5_finish(&state, digest); + u_char digest[16]; + MD5(id_tag, p - id_tag, digest); *hash = new HashKey(&digest, sizeof(digest)); CacheEntry* e = states.Lookup(*hash); diff --git a/src/File.cc b/src/File.cc index d4e31bcc16..8b432f4428 100644 --- a/src/File.cc +++ b/src/File.cc @@ -74,9 +74,8 @@ void RotateTimer::Dispatch(double t, int is_expire) // The following could in principle be part of a "file manager" object. -#define MAX_FILE_CACHE_SIZE 32 +#define MAX_FILE_CACHE_SIZE 512 static int num_files_in_cache = 0; -static int max_files_in_cache = 0; static BroFile* head = 0; static BroFile* tail = 0; @@ -87,12 +86,9 @@ double BroFile::default_rotation_size = 0; // that we should use for the cache. static int maximize_num_fds() { -#ifdef NO_HAVE_SETRLIMIT - return MAX_FILE_CACHE_SIZE; -#else struct rlimit rl; if ( getrlimit(RLIMIT_NOFILE, &rl) < 0 ) - reporter->InternalError("maximize_num_fds(): getrlimit failed"); + reporter->FatalError("maximize_num_fds(): getrlimit failed"); if ( rl.rlim_max == RLIM_INFINITY ) { @@ -108,10 +104,9 @@ static int maximize_num_fds() rl.rlim_cur = rl.rlim_max; if ( setrlimit(RLIMIT_NOFILE, &rl) < 0 ) - reporter->InternalError("maximize_num_fds(): setrlimit failed"); + reporter->FatalError("maximize_num_fds(): setrlimit failed"); return rl.rlim_cur / 2; -#endif } @@ -172,7 +167,7 @@ const char* BroFile::Name() const return 0; } -bool BroFile::Open(FILE* file) +bool BroFile::Open(FILE* file, const char* mode) { open_time = network_time ? network_time : current_time(); @@ -196,7 +191,12 @@ bool BroFile::Open(FILE* file) InstallRotateTimer(); if ( ! f ) - f = fopen(name, access); + { + if ( ! mode ) + f = fopen(name, access); + else + f = fopen(name, mode); + } SetBuf(buffered); @@ -846,8 +846,8 @@ BroFile* BroFile::Unserialize(UnserialInfo* info) } } - // Otherwise, open. - if ( ! file->Open() ) + // Otherwise, open, but don't clobber. + if ( ! file->Open(0, "a") ) { info->s->Error(fmt("cannot open %s: %s", file->name, strerror(errno))); diff --git a/src/File.h b/src/File.h index 444d6209e2..37f844867b 100644 --- a/src/File.h +++ b/src/File.h @@ -87,7 +87,13 @@ protected: BroFile() { Init(); } void Init(); - bool Open(FILE* f = 0); // if file is given, it's an open file to use + + /** + * If file is given, it's an open file to use already. + * If file is not given and mode is, the filename will be opened with that + * access mode. + */ + bool Open(FILE* f = 0, const char* mode = 0); BroFile* Prev() { return prev; } BroFile* Next() { return next; } diff --git a/src/Func.cc b/src/Func.cc index 65cb22b09d..ecb341e3e0 100644 --- a/src/Func.cc +++ b/src/Func.cc @@ -29,7 +29,6 @@ #include -#include "md5.h" #include "Base64.h" #include "Stmt.h" #include "Scope.h" diff --git a/src/ICMP.cc b/src/ICMP.cc index 5e1eeb66e4..05a6b67dff 100644 --- a/src/ICMP.cc +++ b/src/ICMP.cc @@ -131,7 +131,7 @@ void ICMP_Analyzer::NextICMP4(double t, const struct icmp* icmpp, int len, int c break; default: - ICMPEvent(icmp_sent, icmpp, len, 0); + ICMPEvent(icmp_sent, icmpp, len, 0, ip_hdr); break; } } @@ -181,23 +181,31 @@ void ICMP_Analyzer::NextICMP6(double t, const struct icmp* icmpp, int len, int c case MLD_LISTENER_REDUCTION: #endif default: - ICMPEvent(icmp_sent, icmpp, len, 1); + // Error messages (i.e., ICMPv6 type < 128) all have + // the same structure for their context, and are + // handled by the same function. + if ( icmpp->icmp_type < 128 ) + Context6(t, icmpp, len, caplen, data, ip_hdr); + else + ICMPEvent(icmp_sent, icmpp, len, 1, ip_hdr); break; } } -void ICMP_Analyzer::ICMPEvent(EventHandlerPtr f, const struct icmp* icmpp, int len, int icmpv6) +void ICMP_Analyzer::ICMPEvent(EventHandlerPtr f, const struct icmp* icmpp, + int len, int icmpv6, const IP_Hdr* ip_hdr) { if ( ! f ) return; val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, icmpv6)); + vl->append(BuildICMPVal(icmpp, len, icmpv6, ip_hdr)); ConnectionEvent(f, vl); } -RecordVal* ICMP_Analyzer::BuildICMPVal(const struct icmp* icmpp, int len, int icmpv6) +RecordVal* ICMP_Analyzer::BuildICMPVal(const struct icmp* icmpp, int len, + int icmpv6, const IP_Hdr* ip_hdr) { if ( ! icmp_conn_val ) { @@ -208,7 +216,8 @@ RecordVal* ICMP_Analyzer::BuildICMPVal(const struct icmp* icmpp, int len, int ic icmp_conn_val->Assign(2, new Val(icmpp->icmp_type, TYPE_COUNT)); icmp_conn_val->Assign(3, new Val(icmpp->icmp_code, TYPE_COUNT)); icmp_conn_val->Assign(4, new Val(len, TYPE_COUNT)); - icmp_conn_val->Assign(5, new Val(icmpv6, TYPE_BOOL)); + icmp_conn_val->Assign(5, new Val(ip_hdr->TTL(), TYPE_COUNT)); + icmp_conn_val->Assign(6, new Val(icmpv6, TYPE_BOOL)); } Ref(icmp_conn_val); @@ -494,7 +503,7 @@ void ICMP_Analyzer::Echo(double t, const struct icmp* icmpp, int len, val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, ip_hdr->NextProto() != IPPROTO_ICMP)); + vl->append(BuildICMPVal(icmpp, len, ip_hdr->NextProto() != IPPROTO_ICMP, ip_hdr)); vl->append(new Val(iid, TYPE_COUNT)); vl->append(new Val(iseq, TYPE_COUNT)); vl->append(new StringVal(payload)); @@ -504,7 +513,7 @@ void ICMP_Analyzer::Echo(double t, const struct icmp* icmpp, int len, void ICMP_Analyzer::RouterAdvert(double t, const struct icmp* icmpp, int len, - int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_router_advertisement; uint32 reachable, retrans; @@ -514,19 +523,24 @@ void ICMP_Analyzer::RouterAdvert(double t, const struct icmp* icmpp, int len, val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1)); - vl->append(new Val(icmpp->icmp_num_addrs, TYPE_COUNT)); - vl->append(new Val(icmpp->icmp_wpa & 0x80, TYPE_BOOL)); - vl->append(new Val(htons(icmpp->icmp_lifetime), TYPE_COUNT)); - vl->append(new Val(reachable, TYPE_INTERVAL)); - vl->append(new Val(retrans, TYPE_INTERVAL)); + vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); + vl->append(new Val(icmpp->icmp_num_addrs, TYPE_COUNT)); // Cur Hop Limit + vl->append(new Val(icmpp->icmp_wpa & 0x80, TYPE_BOOL)); // Managed + vl->append(new Val(icmpp->icmp_wpa & 0x40, TYPE_BOOL)); // Other + vl->append(new Val(icmpp->icmp_wpa & 0x20, TYPE_BOOL)); // Home Agent + vl->append(new Val((icmpp->icmp_wpa & 0x18)>>3, TYPE_COUNT)); // Pref + vl->append(new Val(icmpp->icmp_wpa & 0x04, TYPE_BOOL)); // Proxy + vl->append(new Val(icmpp->icmp_wpa & 0x02, TYPE_COUNT)); // Reserved + vl->append(new IntervalVal((double)ntohs(icmpp->icmp_lifetime), Seconds)); + vl->append(new IntervalVal((double)ntohl(reachable), Milliseconds)); + vl->append(new IntervalVal((double)ntohl(retrans), Milliseconds)); ConnectionEvent(f, vl); } void ICMP_Analyzer::NeighborAdvert(double t, const struct icmp* icmpp, int len, - int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_neighbor_advertisement; in6_addr tgtaddr; @@ -535,7 +549,10 @@ void ICMP_Analyzer::NeighborAdvert(double t, const struct icmp* icmpp, int len, val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); + vl->append(new Val(icmpp->icmp_num_addrs & 0x80, TYPE_BOOL)); // Router + vl->append(new Val(icmpp->icmp_num_addrs & 0x40, TYPE_BOOL)); // Solicited + vl->append(new Val(icmpp->icmp_num_addrs & 0x20, TYPE_BOOL)); // Override vl->append(new AddrVal(IPAddr(tgtaddr))); ConnectionEvent(f, vl); @@ -543,7 +560,7 @@ void ICMP_Analyzer::NeighborAdvert(double t, const struct icmp* icmpp, int len, void ICMP_Analyzer::NeighborSolicit(double t, const struct icmp* icmpp, int len, - int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_neighbor_solicitation; in6_addr tgtaddr; @@ -552,7 +569,7 @@ void ICMP_Analyzer::NeighborSolicit(double t, const struct icmp* icmpp, int len, val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); vl->append(new AddrVal(IPAddr(tgtaddr))); ConnectionEvent(f, vl); @@ -560,7 +577,7 @@ void ICMP_Analyzer::NeighborSolicit(double t, const struct icmp* icmpp, int len, void ICMP_Analyzer::Redirect(double t, const struct icmp* icmpp, int len, - int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = icmp_redirect; in6_addr tgtaddr, dstaddr; @@ -570,7 +587,7 @@ void ICMP_Analyzer::Redirect(double t, const struct icmp* icmpp, int len, val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); vl->append(new AddrVal(IPAddr(tgtaddr))); vl->append(new AddrVal(IPAddr(dstaddr))); @@ -579,7 +596,7 @@ void ICMP_Analyzer::Redirect(double t, const struct icmp* icmpp, int len, void ICMP_Analyzer::Router(double t, const struct icmp* icmpp, int len, - int caplen, const u_char*& data, const IP_Hdr* /*ip_hdr*/) + int caplen, const u_char*& data, const IP_Hdr* ip_hdr) { EventHandlerPtr f = 0; @@ -590,13 +607,13 @@ void ICMP_Analyzer::Router(double t, const struct icmp* icmpp, int len, break; case ICMP6_ROUTER_RENUMBERING: default: - ICMPEvent(icmp_sent, icmpp, len, 1); + ICMPEvent(icmp_sent, icmpp, len, 1, ip_hdr); return; } val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); ConnectionEvent(f, vl); } @@ -622,7 +639,7 @@ void ICMP_Analyzer::Context4(double t, const struct icmp* icmpp, { val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 0)); + vl->append(BuildICMPVal(icmpp, len, 0, ip_hdr)); vl->append(new Val(icmpp->icmp_code, TYPE_COUNT)); vl->append(ExtractICMP4Context(caplen, data)); ConnectionEvent(f, vl); @@ -652,13 +669,17 @@ void ICMP_Analyzer::Context6(double t, const struct icmp* icmpp, case ICMP6_PACKET_TOO_BIG: f = icmp_packet_too_big; break; + + default: + f = icmp_error_message; + break; } if ( f ) { val_list* vl = new val_list; vl->append(BuildConnVal()); - vl->append(BuildICMPVal(icmpp, len, 1)); + vl->append(BuildICMPVal(icmpp, len, 1, ip_hdr)); vl->append(new Val(icmpp->icmp_code, TYPE_COUNT)); vl->append(ExtractICMP6Context(caplen, data)); ConnectionEvent(f, vl); diff --git a/src/ICMP.h b/src/ICMP.h index 59a399f74f..33773b9762 100644 --- a/src/ICMP.h +++ b/src/ICMP.h @@ -33,7 +33,8 @@ protected: virtual bool IsReuse(double t, const u_char* pkt); virtual unsigned int MemoryAllocation() const; - void ICMPEvent(EventHandlerPtr f, const struct icmp* icmpp, int len, int icmpv6); + void ICMPEvent(EventHandlerPtr f, const struct icmp* icmpp, int len, + int icmpv6, const IP_Hdr* ip_hdr); void Echo(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr); @@ -52,7 +53,8 @@ protected: void Describe(ODesc* d) const; - RecordVal* BuildICMPVal(const struct icmp* icmpp, int len, int icmpv6); + RecordVal* BuildICMPVal(const struct icmp* icmpp, int len, int icmpv6, + const IP_Hdr* ip_hdr); void NextICMP4(double t, const struct icmp* icmpp, int len, int caplen, const u_char*& data, const IP_Hdr* ip_hdr ); diff --git a/src/MIME.cc b/src/MIME.cc index 103cf149ef..4a7c0268b0 100644 --- a/src/MIME.cc +++ b/src/MIME.cc @@ -4,6 +4,7 @@ #include "MIME.h" #include "Event.h" #include "Reporter.h" +#include "digest.h" // Here are a few things to do: // @@ -1008,7 +1009,7 @@ void MIME_Mail::Done() if ( compute_content_hash && mime_content_hash ) { u_char* digest = new u_char[16]; - md5_finish(&md5_hash, digest); + md5_final(&md5_hash, digest); val_list* vl = new val_list; vl->append(analyzer->BuildConnVal()); @@ -1096,7 +1097,7 @@ void MIME_Mail::SubmitData(int len, const char* buf) if ( compute_content_hash ) { content_hash_length += len; - md5_append(&md5_hash, (const u_char*) buf, len); + md5_update(&md5_hash, (const u_char*) buf, len); } if ( mime_entity_data || mime_all_data ) diff --git a/src/MIME.h b/src/MIME.h index 52d943fb15..ffff30e387 100644 --- a/src/MIME.h +++ b/src/MIME.h @@ -2,13 +2,12 @@ #define mime_h #include - +#include #include #include #include using namespace std; -#include "md5.h" #include "Base64.h" #include "BroString.h" #include "Analyzer.h" @@ -248,7 +247,7 @@ protected: int buffer_offset; int compute_content_hash; int content_hash_length; - md5_state_t md5_hash; + MD5_CTX md5_hash; vector entity_content; vector all_content; diff --git a/src/NetVar.cc b/src/NetVar.cc index 390598bb99..cf88794695 100644 --- a/src/NetVar.cc +++ b/src/NetVar.cc @@ -164,6 +164,7 @@ TableVal* preserve_orig_addr; TableVal* preserve_resp_addr; TableVal* preserve_other_addr; +int max_files_in_cache; double log_rotate_interval; double log_max_size; RecordType* rotate_info; @@ -254,6 +255,7 @@ void init_general_global_var() state_dir = internal_val("state_dir")->AsStringVal(); state_write_delay = opt_internal_double("state_write_delay"); + max_files_in_cache = opt_internal_int("max_files_in_cache"); log_rotate_interval = opt_internal_double("log_rotate_interval"); log_max_size = opt_internal_double("log_max_size"); rotate_info = internal_type("rotate_info")->AsRecordType(); diff --git a/src/NetVar.h b/src/NetVar.h index f5c17f64aa..7c40eca1fa 100644 --- a/src/NetVar.h +++ b/src/NetVar.h @@ -167,6 +167,7 @@ extern double connection_status_update_interval; extern StringVal* state_dir; extern double state_write_delay; +extern int max_files_in_cache; extern double log_rotate_interval; extern double log_max_size; extern RecordType* rotate_info; diff --git a/src/Reporter.cc b/src/Reporter.cc index 37470cd690..18f39ce4af 100644 --- a/src/Reporter.cc +++ b/src/Reporter.cc @@ -149,7 +149,7 @@ void Reporter::WeirdHelper(EventHandlerPtr event, Val* conn_val, const char* add va_list ap; va_start(ap, fmt_name); - DoLog("weird", event, stderr, 0, vl, false, false, 0, fmt_name, ap); + DoLog("weird", event, 0, 0, vl, false, false, 0, fmt_name, ap); va_end(ap); delete vl; @@ -163,7 +163,7 @@ void Reporter::WeirdFlowHelper(const IPAddr& orig, const IPAddr& resp, const cha va_list ap; va_start(ap, fmt_name); - DoLog("weird", flow_weird, stderr, 0, vl, false, false, 0, fmt_name, ap); + DoLog("weird", flow_weird, 0, 0, vl, false, false, 0, fmt_name, ap); va_end(ap); delete vl; @@ -326,7 +326,8 @@ void Reporter::DoLog(const char* prefix, EventHandlerPtr event, FILE* out, Conne s += buffer; s += "\n"; - fprintf(out, "%s", s.c_str()); + if ( out ) + fprintf(out, "%s", s.c_str()); if ( addl ) { diff --git a/src/SSL-binpac.cc b/src/SSL.cc similarity index 66% rename from src/SSL-binpac.cc rename to src/SSL.cc index db9a7004d6..218b17080b 100644 --- a/src/SSL-binpac.cc +++ b/src/SSL.cc @@ -1,21 +1,21 @@ -#include "SSL-binpac.h" +#include "SSL.h" #include "TCP_Reassembler.h" #include "Reporter.h" #include "util.h" -SSL_Analyzer_binpac::SSL_Analyzer_binpac(Connection* c) +SSL_Analyzer::SSL_Analyzer(Connection* c) : TCP_ApplicationAnalyzer(AnalyzerTag::SSL, c) { interp = new binpac::SSL::SSL_Conn(this); had_gap = false; } -SSL_Analyzer_binpac::~SSL_Analyzer_binpac() +SSL_Analyzer::~SSL_Analyzer() { delete interp; } -void SSL_Analyzer_binpac::Done() +void SSL_Analyzer::Done() { TCP_ApplicationAnalyzer::Done(); @@ -23,23 +23,22 @@ void SSL_Analyzer_binpac::Done() interp->FlowEOF(false); } -void SSL_Analyzer_binpac::EndpointEOF(TCP_Reassembler* endp) +void SSL_Analyzer::EndpointEOF(TCP_Reassembler* endp) { TCP_ApplicationAnalyzer::EndpointEOF(endp); interp->FlowEOF(endp->IsOrig()); } -void SSL_Analyzer_binpac::DeliverStream(int len, const u_char* data, bool orig) +void SSL_Analyzer::DeliverStream(int len, const u_char* data, bool orig) { TCP_ApplicationAnalyzer::DeliverStream(len, data, orig); assert(TCP()); - if ( TCP()->IsPartial() ) return; if ( had_gap ) - // XXX: If only one side had a content gap, we could still try to + // If only one side had a content gap, we could still try to // deliver data to the other side if the script layer can handle this. return; @@ -53,7 +52,7 @@ void SSL_Analyzer_binpac::DeliverStream(int len, const u_char* data, bool orig) } } -void SSL_Analyzer_binpac::Undelivered(int seq, int len, bool orig) +void SSL_Analyzer::Undelivered(int seq, int len, bool orig) { TCP_ApplicationAnalyzer::Undelivered(seq, len, orig); had_gap = true; diff --git a/src/SSL-binpac.h b/src/SSL.h similarity index 74% rename from src/SSL-binpac.h rename to src/SSL.h index 8dab19d00c..c9f8d9be91 100644 --- a/src/SSL-binpac.h +++ b/src/SSL.h @@ -1,14 +1,13 @@ -#ifndef ssl_binpac_h -#define ssl_binpac_h +#ifndef ssl_h +#define ssl_h #include "TCP.h" - #include "ssl_pac.h" -class SSL_Analyzer_binpac : public TCP_ApplicationAnalyzer { +class SSL_Analyzer : public TCP_ApplicationAnalyzer { public: - SSL_Analyzer_binpac(Connection* conn); - virtual ~SSL_Analyzer_binpac(); + SSL_Analyzer(Connection* conn); + virtual ~SSL_Analyzer(); // Overriden from Analyzer. virtual void Done(); @@ -19,7 +18,7 @@ public: virtual void EndpointEOF(TCP_Reassembler* endp); static Analyzer* InstantiateAnalyzer(Connection* conn) - { return new SSL_Analyzer_binpac(conn); } + { return new SSL_Analyzer(conn); } static bool Available() { diff --git a/src/Type.cc b/src/Type.cc index 82221303af..d688b15376 100644 --- a/src/Type.cc +++ b/src/Type.cc @@ -15,10 +15,9 @@ extern int generate_documentation; +// Note: This function must be thread-safe. const char* type_name(TypeTag t) { - static char errbuf[512]; - static const char* type_names[int(NUM_TYPES)] = { "void", "bool", "int", "count", "counter", @@ -37,10 +36,7 @@ const char* type_name(TypeTag t) }; if ( int(t) >= NUM_TYPES ) - { - snprintf(errbuf, sizeof(errbuf), "%d: not a type tag", int(t)); - return errbuf; - } + return "type_name(): not a type tag"; return type_names[int(t)]; } diff --git a/src/bro.bif b/src/bro.bif index f76704cfe6..15740a83c7 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -6,13 +6,13 @@ %%{ // C segment #include - #include #include #include #include #include +#include "digest.h" #include "Reporter.h" #include "IPAddr.h" @@ -530,7 +530,7 @@ function piped_exec%(program: string, to_write: string%): bool %%{ static void hash_md5_val(val_list& vlist, unsigned char digest[16]) { - md5_state_s h; + MD5_CTX h; md5_init(&h); loop_over_list(vlist, i) @@ -539,16 +539,16 @@ static void hash_md5_val(val_list& vlist, unsigned char digest[16]) if ( v->Type()->Tag() == TYPE_STRING ) { const BroString* str = v->AsString(); - md5_append(&h, str->Bytes(), str->Len()); + md5_update(&h, str->Bytes(), str->Len()); } else { ODesc d(DESC_BINARY); v->Describe(&d); - md5_append(&h, (const md5_byte_t *) d.Bytes(), d.Len()); + md5_update(&h, (const u_char *) d.Bytes(), d.Len()); } } - md5_finish(&h, digest); + md5_final(&h, digest); } static void hmac_md5_val(val_list& vlist, unsigned char digest[16]) @@ -556,7 +556,53 @@ static void hmac_md5_val(val_list& vlist, unsigned char digest[16]) hash_md5_val(vlist, digest); for ( int i = 0; i < 16; ++i ) digest[i] = digest[i] ^ shared_hmac_md5_key[i]; - hash_md5(16, digest, digest); + MD5(digest, 16, digest); + } + +static void hash_sha1_val(val_list& vlist, unsigned char digest[20]) + { + SHA_CTX h; + + sha1_init(&h); + loop_over_list(vlist, i) + { + Val* v = vlist[i]; + if ( v->Type()->Tag() == TYPE_STRING ) + { + const BroString* str = v->AsString(); + sha1_update(&h, str->Bytes(), str->Len()); + } + else + { + ODesc d(DESC_BINARY); + v->Describe(&d); + sha1_update(&h, (const u_char *) d.Bytes(), d.Len()); + } + } + sha1_final(&h, digest); + } + +static void hash_sha256_val(val_list& vlist, unsigned char digest[32]) + { + SHA256_CTX h; + + sha256_init(&h); + loop_over_list(vlist, i) + { + Val* v = vlist[i]; + if ( v->Type()->Tag() == TYPE_STRING ) + { + const BroString* str = v->AsString(); + sha256_update(&h, str->Bytes(), str->Len()); + } + else + { + ODesc d(DESC_BINARY); + v->Describe(&d); + sha256_update(&h, (const u_char *) d.Bytes(), d.Len()); + } + } + sha256_final(&h, digest); } %%} @@ -565,6 +611,8 @@ static void hmac_md5_val(val_list& vlist, unsigned char digest[16]) ## Returns: The MD5 hash value of the concatenated arguments. ## ## .. bro:see:: md5_hmac md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish ## ## .. note:: ## @@ -578,6 +626,46 @@ function md5_hash%(...%): string return new StringVal(md5_digest_print(digest)); %} +## Computes the SHA1 hash value of the provided list of arguments. +## +## Returns: The SHA1 hash value of the concatenated arguments. +## +## .. bro:see:: md5_hash md5_hmac md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +## +## .. note:: +## +## This function performs a one-shot computation of its arguments. +## For incremental hash computation, see :bro:id:`sha1_hash_init` and +## friends. +function sha1_hash%(...%): string + %{ + unsigned char digest[20]; + hash_sha1_val(@ARG@, digest); + return new StringVal(sha1_digest_print(digest)); + %} + +## Computes the SHA256 hash value of the provided list of arguments. +## +## Returns: The SHA256 hash value of the concatenated arguments. +## +## .. bro:see:: md5_hash md5_hmac md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash_init sha256_hash_update sha256_hash_finish +## +## .. note:: +## +## This function performs a one-shot computation of its arguments. +## For incremental hash computation, see :bro:id:`sha256_hash_init` and +## friends. +function sha256_hash%(...%): string + %{ + unsigned char digest[32]; + hash_sha256_val(@ARG@, digest); + return new StringVal(sha256_digest_print(digest)); + %} + ## Computes an HMAC-MD5 hash value of the provided list of arguments. The HMAC ## secret key is generated from available entropy when Bro starts up, or it can ## be specified for repeatability using the ``-K`` command line flag. @@ -585,6 +673,8 @@ function md5_hash%(...%): string ## Returns: The HMAC-MD5 hash value of the concatenated arguments. ## ## .. bro:see:: md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish function md5_hmac%(...%): string %{ unsigned char digest[16]; @@ -593,7 +683,9 @@ function md5_hmac%(...%): string %} %%{ -static map md5_states; +static map md5_states; +static map sha1_states; +static map sha256_states; BroString* convert_index_to_string(Val* index) { @@ -618,7 +710,9 @@ BroString* convert_index_to_string(Val* index) ## ## index: The unique identifier to associate with this hash computation. ## -## .. bro:see:: md5_hash md5_hmac md5_hash_update md5_hash_finish +## .. bro:see:: md5_hmac md5_hash md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish function md5_hash_init%(index: any%): bool %{ BroString* s = convert_index_to_string(index); @@ -626,7 +720,7 @@ function md5_hash_init%(index: any%): bool if ( md5_states.count(*s) < 1 ) { - md5_state_s h; + MD5_CTX h; md5_init(&h); md5_states[*s] = h; status = 1; @@ -636,6 +730,75 @@ function md5_hash_init%(index: any%): bool return new Val(status, TYPE_BOOL); %} +## Initializes SHA1 state to enable incremental hash computation. After +## initializing the SHA1 state with this function, you can feed data to +## :bro:id:`sha1_hash_update` and finally need to call +## :bro:id:`sha1_hash_finish` to finish the computation and get the final hash +## value. +## +## For example, when computing incremental SHA1 values of transferred files in +## multiple concurrent HTTP connections, one would call ``sha1_hash_init(c$id)`` +## once before invoking ``sha1_hash_update(c$id, some_more_data)`` in the +## :bro:id:`http_entity_data` event handler. When all data has arrived, a call +## to :bro:id:`sha1_hash_finish` returns the final hash value. +## +## index: The unique identifier to associate with this hash computation. +## +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +function sha1_hash_init%(index: any%): bool + %{ + BroString* s = convert_index_to_string(index); + int status = 0; + + if ( sha1_states.count(*s) < 1 ) + { + SHA_CTX h; + sha1_init(&h); + sha1_states[*s] = h; + status = 1; + } + + delete s; + return new Val(status, TYPE_BOOL); + %} + +## Initializes SHA256 state to enable incremental hash computation. After +## initializing the SHA256 state with this function, you can feed data to +## :bro:id:`sha256_hash_update` and finally need to call +## :bro:id:`sha256_hash_finish` to finish the computation and get the final hash +## value. +## +## For example, when computing incremental SHA256 values of transferred files in +## multiple concurrent HTTP connections, one would call +## ``sha256_hash_init(c$id)`` once before invoking +## ``sha256_hash_update(c$id, some_more_data)`` in the +## :bro:id:`http_entity_data` event handler. When all data has arrived, a call +## to :bro:id:`sha256_hash_finish` returns the final hash value. +## +## index: The unique identifier to associate with this hash computation. +## +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_update sha256_hash_finish +function sha256_hash_init%(index: any%): bool + %{ + BroString* s = convert_index_to_string(index); + int status = 0; + + if ( sha256_states.count(*s) < 1 ) + { + SHA256_CTX h; + sha256_init(&h); + sha256_states[*s] = h; + status = 1; + } + + delete s; + return new Val(status, TYPE_BOOL); + %} + ## Update the MD5 value associated with a given index. It is required to ## call :bro:id:`md5_hash_init` once before calling this ## function. @@ -644,7 +807,9 @@ function md5_hash_init%(index: any%): bool ## ## data: The data to add to the hash computation. ## -## .. bro:see:: md5_hash md5_hmac md5_hash_init md5_hash_finish +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish function md5_hash_update%(index: any, data: string%): bool %{ BroString* s = convert_index_to_string(index); @@ -652,7 +817,59 @@ function md5_hash_update%(index: any, data: string%): bool if ( md5_states.count(*s) > 0 ) { - md5_append(&md5_states[*s], data->Bytes(), data->Len()); + md5_update(&md5_states[*s], data->Bytes(), data->Len()); + status = 1; + } + + delete s; + return new Val(status, TYPE_BOOL); + %} + +## Update the SHA1 value associated with a given index. It is required to +## call :bro:id:`sha1_hash_init` once before calling this +## function. +## +## index: The unique identifier to associate with this hash computation. +## +## data: The data to add to the hash computation. +## +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +function sha1_hash_update%(index: any, data: string%): bool + %{ + BroString* s = convert_index_to_string(index); + int status = 0; + + if ( sha1_states.count(*s) > 0 ) + { + sha1_update(&sha1_states[*s], data->Bytes(), data->Len()); + status = 1; + } + + delete s; + return new Val(status, TYPE_BOOL); + %} + +## Update the SHA256 value associated with a given index. It is required to +## call :bro:id:`sha256_hash_init` once before calling this +## function. +## +## index: The unique identifier to associate with this hash computation. +## +## data: The data to add to the hash computation. +## +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_finish +function sha256_hash_update%(index: any, data: string%): bool + %{ + BroString* s = convert_index_to_string(index); + int status = 0; + + if ( sha256_states.count(*s) > 0 ) + { + sha256_update(&sha256_states[*s], data->Bytes(), data->Len()); status = 1; } @@ -666,7 +883,9 @@ function md5_hash_update%(index: any, data: string%): bool ## ## Returns: The hash value associated with the computation at *index*. ## -## .. bro:see:: md5_hash md5_hmac md5_hash_init md5_hash_update +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish function md5_hash_finish%(index: any%): string %{ BroString* s = convert_index_to_string(index); @@ -675,7 +894,7 @@ function md5_hash_finish%(index: any%): string if ( md5_states.count(*s) > 0 ) { unsigned char digest[16]; - md5_finish(&md5_states[*s], digest); + md5_final(&md5_states[*s], digest); md5_states.erase(*s); printable_digest = new StringVal(md5_digest_print(digest)); } @@ -686,6 +905,62 @@ function md5_hash_finish%(index: any%): string return printable_digest; %} +## Returns the final SHA1 digest of an incremental hash computation. +## +## index: The unique identifier of this hash computation. +## +## Returns: The hash value associated with the computation at *index*. +## +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update +## sha256_hash sha256_hash_init sha256_hash_update sha256_hash_finish +function sha1_hash_finish%(index: any%): string + %{ + BroString* s = convert_index_to_string(index); + StringVal* printable_digest; + + if ( sha1_states.count(*s) > 0 ) + { + unsigned char digest[20]; + sha1_final(&sha1_states[*s], digest); + sha1_states.erase(*s); + printable_digest = new StringVal(sha1_digest_print(digest)); + } + else + printable_digest = new StringVal(""); + + delete s; + return printable_digest; + %} + +## Returns the final SHA256 digest of an incremental hash computation. +## +## index: The unique identifier of this hash computation. +## +## Returns: The hash value associated with the computation at *index*. +## +## .. bro:see:: md5_hmac md5_hash md5_hash_init md5_hash_update md5_hash_finish +## sha1_hash sha1_hash_init sha1_hash_update sha1_hash_finish +## sha256_hash sha256_hash_init sha256_hash_update +function sha256_hash_finish%(index: any%): string + %{ + BroString* s = convert_index_to_string(index); + StringVal* printable_digest; + + if ( sha256_states.count(*s) > 0 ) + { + unsigned char digest[32]; + sha256_final(&sha256_states[*s], digest); + sha256_states.erase(*s); + printable_digest = new StringVal(sha256_digest_print(digest)); + } + else + printable_digest = new StringVal(""); + + delete s; + return printable_digest; + %} + ## Generates a random number. ## ## max: The maximum value the random number. diff --git a/src/digest.h b/src/digest.h new file mode 100644 index 0000000000..ef52ba059a --- /dev/null +++ b/src/digest.h @@ -0,0 +1,92 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +/** + * Wrapper and helper functions for MD5/SHA digest algorithms. + */ + +#ifndef bro_digest_h +#define bro_digest_h + +#include +#include + +#include "Reporter.h" + +static inline const char* digest_print(const u_char* digest, size_t n) + { + static char buf[256]; // big enough for any of md5/sha1/sha256 + for ( size_t i = 0; i < n; ++i ) + snprintf(buf + i * 2, 3, "%02x", digest[i]); + return buf; + } + +inline const char* md5_digest_print(const u_char digest[MD5_DIGEST_LENGTH]) + { + return digest_print(digest, MD5_DIGEST_LENGTH); + } + +inline const char* sha1_digest_print(const u_char digest[SHA_DIGEST_LENGTH]) + { + return digest_print(digest, SHA_DIGEST_LENGTH); + } + +inline const char* sha256_digest_print(const u_char digest[SHA256_DIGEST_LENGTH]) + { + return digest_print(digest, SHA256_DIGEST_LENGTH); + } + +inline void md5_init(MD5_CTX* c) + { + if ( ! MD5_Init(c) ) + reporter->InternalError("MD5_Init failed"); + } + +inline void md5_update(MD5_CTX* c, const void* data, unsigned long len) + { + if ( ! MD5_Update(c, data, len) ) + reporter->InternalError("MD5_Update failed"); + } + +inline void md5_final(MD5_CTX* c, u_char md[MD5_DIGEST_LENGTH]) + { + if ( ! MD5_Final(md, c) ) + reporter->InternalError("MD5_Final failed"); + } + +inline void sha1_init(SHA_CTX* c) + { + if ( ! SHA1_Init(c) ) + reporter->InternalError("SHA_Init failed"); + } + +inline void sha1_update(SHA_CTX* c, const void* data, unsigned long len) + { + if ( ! SHA1_Update(c, data, len) ) + reporter->InternalError("SHA_Update failed"); + } + +inline void sha1_final(SHA_CTX* c, u_char md[SHA_DIGEST_LENGTH]) + { + if ( ! SHA1_Final(md, c) ) + reporter->InternalError("SHA_Final failed"); + } + +inline void sha256_init(SHA256_CTX* c) + { + if ( ! SHA256_Init(c) ) + reporter->InternalError("SHA256_Init failed"); + } + +inline void sha256_update(SHA256_CTX* c, const void* data, unsigned long len) + { + if ( ! SHA256_Update(c, data, len) ) + reporter->InternalError("SHA256_Update failed"); + } + +inline void sha256_final(SHA256_CTX* c, u_char md[SHA256_DIGEST_LENGTH]) + { + if ( ! SHA256_Final(md, c) ) + reporter->InternalError("SHA256_Final failed"); + } + +#endif //bro_digest_h diff --git a/src/event.bif b/src/event.bif index 4f586c4f79..94ee923240 100644 --- a/src/event.bif +++ b/src/event.bif @@ -773,10 +773,9 @@ event udp_contents%(u: connection, is_orig: bool, contents: string%); ## .. bro:see:: udp_contents udp_reply udp_request event udp_session_done%(u: connection%); -## Generated for all ICMP messages that are not handled separetely with dedicated -## ICMP events. Bro's ICMP analyzer handles a number of ICMP messages directly -## with dedicated events. This handlers acts as a fallback for those it doesn't. -## The *icmp* record provides more information about the message. +## Generated for all ICMP messages that are not handled separately with dedicated +## ICMP events. Bro's ICMP analyzer handles a number of ICMP messages directly +## with dedicated events. This event acts as a fallback for those it doesn't. ## ## See `Wikipedia ## `__ for more @@ -787,8 +786,7 @@ event udp_session_done%(u: connection%); ## icmp: Additional ICMP-specific information augmenting the standard ## connection record *c*. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect -## icmp_time_exceeded icmp_unreachable +## .. bro:see:: icmp_error_message event icmp_sent%(c: connection, icmp: icmp_conn%); ## Generated for ICMP *echo request* messages. @@ -809,8 +807,7 @@ event icmp_sent%(c: connection, icmp: icmp_conn%); ## payload: The message-specific data of the packet payload, i.e., everything after ## the first 8 bytes of the ICMP header. ## -## .. bro:see:: icmp_echo_reply icmp_redirect icmp_sent -## icmp_time_exceeded icmp_unreachable +## .. bro:see:: icmp_echo_reply event icmp_echo_request%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string%); ## Generated for ICMP *echo reply* messages. @@ -831,26 +828,30 @@ event icmp_echo_request%(c: connection, icmp: icmp_conn, id: count, seq: count, ## payload: The message-specific data of the packet payload, i.e., everything after ## the first 8 bytes of the ICMP header. ## -## .. bro:see:: icmp_echo_request icmp_redirect icmp_sent -## icmp_time_exceeded icmp_unreachable +## .. bro:see:: icmp_echo_request event icmp_echo_reply%(c: connection, icmp: icmp_conn, id: count, seq: count, payload: string%); -## Generated for all ICMP error messages that are not handled separately with dedicated -## ICMP events. Bro's ICMP analyzer handles a number of ICMP messages directly -## with dedicated events. This handler acts as a fallback for those it doesn't. -## The *icmp* record provides more information about the message. +## Generated for all ICMPv6 error messages that are not handled +## separately with dedicated events. Bro's ICMP analyzer handles a number +## of ICMP error messages directly with dedicated events. This event acts +## as a fallback for those it doesn't. ## ## See `Wikipedia -## `__ for more -## information about the ICMP protocol. +## `__ for more +## information about the ICMPv6 protocol. ## ## c: The connection record for the corresponding ICMP flow. ## ## icmp: Additional ICMP-specific information augmenting the standard ## connection record *c*. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect -## icmp_time_exceeded icmp_unreachable +## code: The ICMP code of the error message. +## +## context: A record with specifics of the original packet that the message refers +## to. +## +## .. bro:see:: icmp_unreachable icmp_packet_too_big +## icmp_time_exceeded icmp_parameter_problem event icmp_error_message%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); ## Generated for ICMP *destination unreachable* messages. @@ -872,15 +873,15 @@ event icmp_error_message%(c: connection, icmp: icmp_conn, code: count, context: ## that if the *unreachable* includes only a partial IP header for some reason, no ## fields of *context* will be filled out. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect icmp_sent -## icmp_time_exceeded +## .. bro:see:: icmp_error_message icmp_packet_too_big +## icmp_time_exceeded icmp_parameter_problem event icmp_unreachable%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); -## Generated for ICMP *packet too big* messages. +## Generated for ICMPv6 *packet too big* messages. ## ## See `Wikipedia -## `__ for more -## information about the ICMP protocol. +## `__ for more +## information about the ICMPv6 protocol. ## ## c: The connection record for the corresponding ICMP flow. ## @@ -895,8 +896,8 @@ event icmp_unreachable%(c: connection, icmp: icmp_conn, code: count, context: ic ## that if the *too big* includes only a partial IP header for some reason, no ## fields of *context* will be filled out. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect icmp_sent -## icmp_time_exceeded +## .. bro:see:: icmp_error_message icmp_unreachable +## icmp_time_exceeded icmp_parameter_problem event icmp_packet_too_big%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); ## Generated for ICMP *time exceeded* messages. @@ -918,15 +919,15 @@ event icmp_packet_too_big%(c: connection, icmp: icmp_conn, code: count, context: ## if the *exceeded* includes only a partial IP header for some reason, no fields ## of *context* will be filled out. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect icmp_sent -## icmp_unreachable +## .. bro:see:: icmp_error_message icmp_unreachable icmp_packet_too_big +## icmp_parameter_problem event icmp_time_exceeded%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); -## Generated for ICMP *parameter problem* messages. +## Generated for ICMPv6 *parameter problem* messages. ## ## See `Wikipedia -## `__ for more -## information about the ICMP protocol. +## `__ for more +## information about the ICMPv6 protocol. ## ## c: The connection record for the corresponding ICMP flow. ## @@ -941,8 +942,8 @@ event icmp_time_exceeded%(c: connection, icmp: icmp_conn, code: count, context: ## if the *parameter problem* includes only a partial IP header for some reason, no fields ## of *context* will be filled out. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_redirect icmp_sent -## icmp_unreachable +## .. bro:see:: icmp_error_message icmp_unreachable icmp_packet_too_big +## icmp_time_exceeded event icmp_parameter_problem%(c: connection, icmp: icmp_conn, code: count, context: icmp_context%); ## Generated for ICMP *router solicitation* messages. @@ -956,8 +957,8 @@ event icmp_parameter_problem%(c: connection, icmp: icmp_conn, code: count, conte ## icmp: Additional ICMP-specific information augmenting the standard connection ## record *c*. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent -## icmp_time_exceeded icmp_unreachable +## .. bro:see:: icmp_router_advertisement +## icmp_neighbor_solicitation icmp_neighbor_advertisement icmp_redirect event icmp_router_solicitation%(c: connection, icmp: icmp_conn%); ## Generated for ICMP *router advertisement* messages. @@ -971,9 +972,30 @@ event icmp_router_solicitation%(c: connection, icmp: icmp_conn%); ## icmp: Additional ICMP-specific information augmenting the standard connection ## record *c*. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent -## icmp_time_exceeded icmp_unreachable -event icmp_router_advertisement%(c: connection, icmp: icmp_conn, hop_limit: count, managed: bool, router_lifetime: count, reachable_time: interval, retrans_timer: interval%); +## cur_hop_limit: The default value that should be placed in Hop Count field +## for outgoing IP packets. +## +## managed: Managed address configuration flag, :rfc:`4861`. +## +## other: Other stateful configuration flag, :rfc:`4861`. +## +## home_agent: Mobile IPv6 home agent flag, :rfc:`3775`. +## +## pref: Router selection preferences, :rfc:`4191`. +## +## proxy: Neighbor discovery proxy flag, :rfc:`4389`. +## +## rsv: Remaining two reserved bits of router advertisement flags. +## +## router_lifetime: How long this router should be used as a default router. +## +## reachable_time: How long a neighbor should be considered reachable. +## +## retrans_timer: How long a host should wait before retransmitting. +## +## .. bro:see:: icmp_router_solicitation +## icmp_neighbor_solicitation icmp_neighbor_advertisement icmp_redirect +event icmp_router_advertisement%(c: connection, icmp: icmp_conn, cur_hop_limit: count, managed: bool, other: bool, home_agent: bool, pref: count, proxy: bool, rsv: count, router_lifetime: interval, reachable_time: interval, retrans_timer: interval%); ## Generated for ICMP *neighbor solicitation* messages. ## @@ -986,8 +1008,10 @@ event icmp_router_advertisement%(c: connection, icmp: icmp_conn, hop_limit: coun ## icmp: Additional ICMP-specific information augmenting the standard connection ## record *c*. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent -## icmp_time_exceeded icmp_unreachable +## tgt: The IP address of the target of the solicitation. +## +## .. bro:see:: icmp_router_solicitation icmp_router_advertisement +## icmp_neighbor_advertisement icmp_redirect event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn, tgt:addr%); ## Generated for ICMP *neighbor advertisement* messages. @@ -1001,9 +1025,18 @@ event icmp_neighbor_solicitation%(c: connection, icmp: icmp_conn, tgt:addr%); ## icmp: Additional ICMP-specific information augmenting the standard connection ## record *c*. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent -## icmp_time_exceeded icmp_unreachable -event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn, tgt:addr%); +## router: Flag indicating the sender is a router. +## +## solicited: Flag indicating advertisement is in response to a solicitation. +## +## override: Flag indicating advertisement should override existing caches. +## +## tgt: the Target Address in the soliciting message or the address whose +## link-layer address has changed for unsolicited adverts. +## +## .. bro:see:: icmp_router_solicitation icmp_router_advertisement +## icmp_neighbor_solicitation icmp_redirect +event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn, router: bool, solicited: bool, override: bool, tgt:addr%); ## Generated for ICMP *redirect* messages. ## @@ -1016,10 +1049,13 @@ event icmp_neighbor_advertisement%(c: connection, icmp: icmp_conn, tgt:addr%); ## icmp: Additional ICMP-specific information augmenting the standard connection ## record *c*. ## -## a: The new destination address the message is redirecting to. +## tgt: The address that is supposed to be a better first hop to use for +## ICMP Destination Address. ## -## .. bro:see:: icmp_echo_reply icmp_echo_request icmp_sent -## icmp_time_exceeded icmp_unreachable +## dest: The address of the destination which is redirected to the target. +## +## .. bro:see:: icmp_router_solicitation icmp_router_advertisement +## icmp_neighbor_solicitation icmp_neighbor_advertisement event icmp_redirect%(c: connection, icmp: icmp_conn, tgt: addr, dest: addr%); ## Generated when a TCP connection terminated, passing on statistics about the diff --git a/src/logging.bif b/src/logging.bif index c8960b4e38..efc6ed0b4b 100644 --- a/src/logging.bif +++ b/src/logging.bif @@ -72,3 +72,12 @@ const set_separator: string; const empty_field: string; const unset_field: string; +# Options for the DataSeries writer. + +module LogDataSeries; + +const compression: string; +const extent_size: count; +const dump_schema: bool; +const use_integer_for_time: bool; +const num_threads: count; diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 9437f0099f..34d10a1abf 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -7,6 +7,7 @@ #include "../NetVar.h" #include "../Net.h" +#include "threading/Manager.h" #include "threading/SerialTypes.h" #include "Manager.h" @@ -16,9 +17,11 @@ #include "writers/Ascii.h" #include "writers/None.h" +#ifdef USE_DATASERIES +#include "writers/DataSeries.h" +#endif + using namespace logging; -using threading::Value; -using threading::Field; // Structure describing a log writer type. struct WriterDefinition { @@ -32,6 +35,9 @@ struct WriterDefinition { WriterDefinition log_writers[] = { { BifEnum::Log::WRITER_NONE, "None", 0, writer::None::Instantiate }, { BifEnum::Log::WRITER_ASCII, "Ascii", 0, writer::Ascii::Instantiate }, +#ifdef USE_DATASERIES + { BifEnum::Log::WRITER_DATASERIES, "DataSeries", 0, writer::DataSeries::Instantiate }, +#endif // End marker, don't touch. { BifEnum::Log::WRITER_DEFAULT, "None", 0, (WriterBackend* (*)(WriterFrontend* frontend))0 } @@ -51,7 +57,7 @@ struct Manager::Filter { Func* postprocessor; int num_fields; - Field** fields; + threading::Field** fields; // Vector indexed by field number. Each element is a list of record // indices defining a path leading to the value across potential @@ -119,6 +125,7 @@ Manager::Stream::~Stream() Manager::Manager() { + rotations_pending = 0; } Manager::~Manager() @@ -127,6 +134,16 @@ Manager::~Manager() delete *s; } +list Manager::SupportedFormats() + { + list formats; + + for ( WriterDefinition* ld = log_writers; ld->type != BifEnum::Log::WRITER_DEFAULT; ++ld ) + formats.push_back(ld->name); + + return formats; + } + WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, bro_int_t type) { WriterDefinition* ld = log_writers; @@ -135,7 +152,7 @@ WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, bro_int_t type) { if ( ld->type == BifEnum::Log::WRITER_DEFAULT ) { - reporter->Error("unknow writer when creating writer"); + reporter->Error("unknown writer type requested"); return 0; } @@ -159,10 +176,8 @@ WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, bro_int_t type) // function. ld->factory = 0; - DBG_LOG(DBG_LOGGING, "failed to init writer class %s", - ld->name); - - return false; + reporter->Error("initialization of writer %s failed", ld->name); + return 0; } } @@ -449,9 +464,9 @@ bool Manager::TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, filter->indices.push_back(new_indices); - filter->fields = (Field**) + filter->fields = (threading::Field**) realloc(filter->fields, - sizeof(Field) * ++filter->num_fields); + sizeof(threading::Field) * ++filter->num_fields); if ( ! filter->fields ) { @@ -459,7 +474,7 @@ bool Manager::TraverseRecord(Stream* stream, Filter* filter, RecordType* rt, return false; } - Field* field = new Field(); + threading::Field* field = new threading::Field(); field->name = new_path; field->type = t->Tag(); @@ -571,7 +586,7 @@ bool Manager::AddFilter(EnumVal* id, RecordVal* fval) for ( int i = 0; i < filter->num_fields; i++ ) { - Field* field = filter->fields[i]; + threading::Field* field = filter->fields[i]; DBG_LOG(DBG_LOGGING, " field %10s: %s", field->name.c_str(), type_name(field->type)); } @@ -743,10 +758,10 @@ bool Manager::Write(EnumVal* id, RecordVal* columns) // Copy the fields for WriterFrontend::Init() as it // will take ownership. - Field** arg_fields = new Field*[filter->num_fields]; + threading::Field** arg_fields = new threading::Field*[filter->num_fields]; for ( int j = 0; j < filter->num_fields; ++j ) - arg_fields[j] = new Field(*filter->fields[j]); + arg_fields[j] = new threading::Field(*filter->fields[j]); writer = CreateWriter(stream->id, filter->writer, path, filter->num_fields, @@ -897,10 +912,10 @@ threading::Value* Manager::ValToLogVal(Val* val, BroType* ty) return lval; } -Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, +threading::Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, RecordVal* columns) { - Value** vals = new Value*[filter->num_fields]; + threading::Value** vals = new threading::Value*[filter->num_fields]; for ( int i = 0; i < filter->num_fields; ++i ) { @@ -919,7 +934,7 @@ Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, if ( ! val ) { // Value, or any of its parents, is not set. - vals[i] = new Value(filter->fields[i]->type, false); + vals[i] = new threading::Value(filter->fields[i]->type, false); break; } } @@ -932,7 +947,7 @@ Value** Manager::RecordToFilterVals(Stream* stream, Filter* filter, } WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, - int num_fields, const Field* const* fields, bool local, bool remote) + int num_fields, const threading::Field* const* fields, bool local, bool remote) { Stream* stream = FindStream(id); @@ -996,7 +1011,7 @@ WriterFrontend* Manager::CreateWriter(EnumVal* id, EnumVal* writer, string path, return writer_obj; } -void Manager::DeleteVals(int num_fields, Value** vals) +void Manager::DeleteVals(int num_fields, threading::Value** vals) { // Note this code is duplicated in WriterBackend::DeleteVals(). for ( int i = 0; i < num_fields; i++ ) @@ -1006,7 +1021,7 @@ void Manager::DeleteVals(int num_fields, Value** vals) } bool Manager::Write(EnumVal* id, EnumVal* writer, string path, int num_fields, - Value** vals) + threading::Value** vals) { Stream* stream = FindStream(id); @@ -1113,10 +1128,19 @@ bool Manager::Flush(EnumVal* id) void Manager::Terminate() { + // Make sure we process all the pending rotations. + while ( rotations_pending ) + { + thread_mgr->ForceProcessing(); // A blatant layering violation ... + usleep(1000); + } + for ( vector::iterator s = streams.begin(); s != streams.end(); ++s ) { - if ( *s ) - Flush((*s)->id); + if ( ! *s ) + continue; + + Flush((*s)->id); } } @@ -1219,11 +1243,19 @@ void Manager::Rotate(WriterInfo* winfo) // Trigger the rotation. winfo->writer->Rotate(tmp, winfo->open_time, network_time, terminating); + + ++rotations_pending; } bool Manager::FinishedRotation(WriterFrontend* writer, string new_name, string old_name, double open, double close, bool terminating) { + --rotations_pending; + + if ( ! writer ) + // Writer didn't produce local output. + return true; + DBG_LOG(DBG_LOGGING, "Finished rotating %s at %.6f, new name %s", writer->Path().c_str(), network_time, new_name.c_str()); diff --git a/src/logging/Manager.h b/src/logging/Manager.h index bf097c5e1a..f5e62b0683 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -15,7 +15,6 @@ class RotationTimer; namespace logging { - class WriterBackend; class WriterFrontend; class RotationFinishedMessage; @@ -56,7 +55,7 @@ public: * logging.bif, which just forwards here. */ bool EnableStream(EnumVal* id); - + /** * Disables a log stream. * @@ -145,6 +144,11 @@ public: */ void Terminate(); + /** + * Returns a list of supported output formats. + */ + static list SupportedFormats(); + protected: friend class WriterFrontend; friend class RotationFinishedMessage; @@ -196,6 +200,7 @@ private: WriterInfo* FindWriter(WriterFrontend* writer); vector streams; // Indexed by stream enum. + int rotations_pending; // Number of rotations not yet finished. }; } diff --git a/src/logging/WriterBackend.cc b/src/logging/WriterBackend.cc index 7c71c09604..23a95279d7 100644 --- a/src/logging/WriterBackend.cc +++ b/src/logging/WriterBackend.cc @@ -1,6 +1,7 @@ // See the file "COPYING" in the main distribution directory for copyright. #include "util.h" +#include "bro_inet_ntop.h" #include "threading/SerialTypes.h" #include "WriterBackend.h" @@ -222,17 +223,6 @@ bool WriterBackend::Flush() return true; } -bool WriterBackend::Finish() - { - if ( ! DoFlush() ) - { - DisableFrontend(); - return false; - } - - return true; - } - bool WriterBackend::DoHeartbeat(double network_time, double current_time) { MsgThread::DoHeartbeat(network_time, current_time); @@ -248,7 +238,7 @@ string WriterBackend::Render(const threading::Value::addr_t& addr) const { char s[INET_ADDRSTRLEN]; - if ( inet_ntop(AF_INET, &addr.in.in4, s, INET_ADDRSTRLEN) == NULL ) + if ( ! bro_inet_ntop(AF_INET, &addr.in.in4, s, INET_ADDRSTRLEN) ) return ""; else return s; @@ -257,7 +247,7 @@ string WriterBackend::Render(const threading::Value::addr_t& addr) const { char s[INET6_ADDRSTRLEN]; - if ( inet_ntop(AF_INET6, &addr.in.in6, s, INET6_ADDRSTRLEN) == NULL ) + if ( ! bro_inet_ntop(AF_INET6, &addr.in.in6, s, INET6_ADDRSTRLEN) ) return ""; else return s; @@ -278,4 +268,9 @@ string WriterBackend::Render(const threading::Value::subnet_t& subnet) const return s; } - +string WriterBackend::Render(double d) const + { + char buf[256]; + modp_dtoa(d, buf, 6); + return buf; + } diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index eea0927fd2..1269976aee 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -101,15 +101,6 @@ public: */ bool Rotate(string rotated_path, double open, double close, bool terminating); - /** - * Finishes writing to this logger in a regularl fashion. Must not be - * called if an error has been indicated earlier. After calling this, - * no further writing must be performed. - * - * @return False if an error occured. - */ - bool Finish(); - /** * Disables the frontend that has instantiated this backend. Once * disabled,the frontend will not send any further message over. @@ -174,7 +165,17 @@ public: */ string Render(const threading::Value::subnet_t& subnet) const; + /** Helper method to render a double in Bro's standard precision. + * + * @param d The double. + * + * @return An ASCII representation of the double. + */ + string Render(double d) const; + protected: + friend class FinishMessage; + /** * Writer-specific intialization method. * @@ -272,26 +273,18 @@ protected: bool terminating) = 0; /** - * Writer-specific method implementing log output finalization at - * termination. Not called when any of the other methods has - * previously signaled an error, i.e., executing this method signals - * a regular shutdown of the writer. + * Writer-specific method called just before the threading system is + * going to shutdown. * - * A writer implementation must override this method but it can just - * ignore calls if flushing doesn't align with its semantics. - * - * If the method returns false, it will be assumed that a fatal error - * has occured that prevents the writer from further operation; it - * will then be disabled and eventually deleted. When returning - * false, an implementation should also call Error() to indicate what - * happened. + * This method can be overridden but one must call + * WriterBackend::DoFinish(). */ - virtual bool DoFinish() = 0; + virtual bool DoFinish() { return MsgThread::DoFinish(); } /** * Triggered by regular heartbeat messages from the main thread. * - * This method can be overridden but once must call + * This method can be overridden but one must call * WriterBackend::DoHeartbeat(). */ virtual bool DoHeartbeat(double network_time, double current_time); diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index c100e99f90..33c9c04c63 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -90,7 +90,7 @@ public: FinishMessage(WriterBackend* backend) : threading::InputMessage("Finish", backend) {} - virtual bool Process() { return Object()->Finish(); } + virtual bool Process() { return Object()->DoFinish(); } }; } @@ -117,8 +117,9 @@ WriterFrontend::WriterFrontend(EnumVal* arg_stream, EnumVal* arg_writer, bool ar if ( local ) { backend = log_mgr->CreateBackend(this, writer->AsEnum()); - assert(backend); - backend->Start(); + + if ( backend ) + backend->Start(); } else @@ -256,6 +257,10 @@ void WriterFrontend::Rotate(string rotated_path, double open, double close, bool if ( backend ) backend->SendIn(new RotateMessage(backend, this, rotated_path, open, close, terminating)); + else + // Still signal log manager that we're done, but signal that + // nothing happened by setting the writer to zeri. + log_mgr->FinishedRotation(0, "", rotated_path, open, close, terminating); } void WriterFrontend::Finish() diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/Ascii.cc index 0759e60a82..1e7a55c34c 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/Ascii.cc @@ -69,8 +69,7 @@ bool Ascii::WriteHeaderField(const string& key, const string& val) return (fwrite(str.c_str(), str.length(), 1, file) == 1); } -bool Ascii::DoInit(string path, int num_fields, - const Field* const * fields) +bool Ascii::DoInit(string path, int num_fields, const Field* const * fields) { if ( output_to_stdout ) path = "/dev/stdout"; @@ -87,6 +86,9 @@ bool Ascii::DoInit(string path, int num_fields, if ( include_header ) { + string names; + string types; + string str = string(header_prefix, header_prefix_len) + "separator " // Always use space as separator here. + get_escaped_string(string(separator, separator_len), false) @@ -104,9 +106,6 @@ bool Ascii::DoInit(string path, int num_fields, WriteHeaderField("path", get_escaped_string(path, false))) ) goto write_error; - string names; - string types; - for ( int i = 0; i < num_fields; ++i ) { if ( i > 0 ) @@ -115,15 +114,8 @@ bool Ascii::DoInit(string path, int num_fields, types += string(separator, separator_len); } - const Field* field = fields[i]; - names += field->name; - types += type_name(field->type); - if ( (field->type == TYPE_TABLE) || (field->type == TYPE_VECTOR) ) - { - types += "["; - types += type_name(field->subtype); - types += "]"; - } + names += fields[i]->name; + types += fields[i]->TypeName(); } if ( ! (WriteHeaderField("fields", names) @@ -146,7 +138,7 @@ bool Ascii::DoFlush() bool Ascii::DoFinish() { - return true; + return WriterBackend::DoFinish(); } bool Ascii::DoWriteOne(ODesc* desc, Value* val, const Field* field) @@ -184,15 +176,19 @@ bool Ascii::DoWriteOne(ODesc* desc, Value* val, const Field* field) desc->Add(Render(val->val.addr_val)); break; - case TYPE_TIME: - case TYPE_INTERVAL: - char buf[256]; - modp_dtoa(val->val.double_val, buf, 6); - desc->Add(buf); + case TYPE_DOUBLE: + // Rendering via Add() truncates trailing 0s after the + // decimal point. The difference with TIME/INTERVAL is mainly + // to keep the log format consistent. + desc->Add(val->val.double_val); break; - case TYPE_DOUBLE: - desc->Add(val->val.double_val); + case TYPE_INTERVAL: + case TYPE_TIME: + // Rendering via Render() keeps trailing 0s after the decimal + // point. The difference with DOUBLEis mainly to keep the log + // format consistent. + desc->Add(Render(val->val.double_val)); break; case TYPE_ENUM: diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc new file mode 100644 index 0000000000..1d5a6ea4da --- /dev/null +++ b/src/logging/writers/DataSeries.cc @@ -0,0 +1,423 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "config.h" + +#ifdef USE_DATASERIES + +#include +#include +#include + +#include + +#include "NetVar.h" +#include "threading/SerialTypes.h" + +#include "DataSeries.h" + +using namespace logging; +using namespace writer; + +std::string DataSeries::LogValueToString(threading::Value *val) + { + // In some cases, no value is attached. If this is the case, return + // an empty string. + if( ! val->present ) + return ""; + + switch(val->type) { + case TYPE_BOOL: + return (val->val.int_val ? "true" : "false"); + + case TYPE_INT: + { + std::ostringstream ostr; + ostr << val->val.int_val; + return ostr.str(); + } + + case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_PORT: + { + std::ostringstream ostr; + ostr << val->val.uint_val; + return ostr.str(); + } + + case TYPE_SUBNET: + return Render(val->val.subnet_val); + + case TYPE_ADDR: + return Render(val->val.addr_val); + + // Note: These two cases are relatively special. We need to convert + // these values into their integer equivalents to maximize precision. + // At the moment, there won't be a noticeable effect (Bro uses the + // double format everywhere internally, so we've already lost the + // precision we'd gain here), but timestamps may eventually switch to + // this representation within Bro. + // + // In the near-term, this *should* lead to better pack_relative (and + // thus smaller output files). + case TYPE_TIME: + case TYPE_INTERVAL: + if ( ds_use_integer_for_time ) + { + std::ostringstream ostr; + ostr << (uint64_t)(DataSeries::TIME_SCALE * val->val.double_val); + return ostr.str(); + } + else + return Render(val->val.double_val); + + case TYPE_DOUBLE: + return Render(val->val.double_val); + + case TYPE_ENUM: + case TYPE_STRING: + case TYPE_FILE: + case TYPE_FUNC: + if ( ! val->val.string_val->size() ) + return ""; + + return string(val->val.string_val->data(), val->val.string_val->size()); + + case TYPE_TABLE: + { + if ( ! val->val.set_val.size ) + return ""; + + string tmpString = ""; + + for ( int j = 0; j < val->val.set_val.size; j++ ) + { + if ( j > 0 ) + tmpString += ds_set_separator; + + tmpString += LogValueToString(val->val.set_val.vals[j]); + } + + return tmpString; + } + + case TYPE_VECTOR: + { + if ( ! val->val.vector_val.size ) + return ""; + + string tmpString = ""; + + for ( int j = 0; j < val->val.vector_val.size; j++ ) + { + if ( j > 0 ) + tmpString += ds_set_separator; + + tmpString += LogValueToString(val->val.vector_val.vals[j]); + } + + return tmpString; + } + + default: + InternalError(Fmt("unknown type %s in DataSeries::LogValueToString", type_name(val->type))); + return "cannot be reached"; + } +} + +string DataSeries::GetDSFieldType(const threading::Field *field) +{ + switch(field->type) { + case TYPE_BOOL: + return "bool"; + + case TYPE_COUNT: + case TYPE_COUNTER: + case TYPE_PORT: + case TYPE_INT: + return "int64"; + + case TYPE_DOUBLE: + return "double"; + + case TYPE_TIME: + case TYPE_INTERVAL: + return ds_use_integer_for_time ? "int64" : "double"; + + case TYPE_SUBNET: + case TYPE_ADDR: + case TYPE_ENUM: + case TYPE_STRING: + case TYPE_FILE: + case TYPE_TABLE: + case TYPE_VECTOR: + case TYPE_FUNC: + return "variable32"; + + default: + InternalError(Fmt("unknown type %s in DataSeries::GetDSFieldType", type_name(field->type))); + return "cannot be reached"; + } +} + +string DataSeries::BuildDSSchemaFromFieldTypes(const vector& vals, string sTitle) + { + if( ! sTitle.size() ) + sTitle = "GenericBroStream"; + + string xmlschema = "\n"; + + for( size_t i = 0; i < vals.size(); ++i ) + { + xmlschema += "\t\n"; + } + + xmlschema += "\n"; + + for( size_t i = 0; i < vals.size(); ++i ) + { + xmlschema += "\n"; + } + + return xmlschema; +} + +std::string DataSeries::GetDSOptionsForType(const threading::Field *field) +{ + switch( field->type ) { + case TYPE_TIME: + case TYPE_INTERVAL: + { + std::string s; + s += "pack_relative=\"" + std::string(field->name) + "\""; + + if ( ! ds_use_integer_for_time ) + s += " pack_scale=\"1e-6\" print_format=\"%.6f\" pack_scale_warn=\"no\""; + else + s += string(" units=\"") + TIME_UNIT() + "\" epoch=\"unix\""; + + return s; + } + + case TYPE_SUBNET: + case TYPE_ADDR: + case TYPE_ENUM: + case TYPE_STRING: + case TYPE_FILE: + case TYPE_TABLE: + case TYPE_VECTOR: + return "pack_unique=\"yes\""; + + default: + return ""; + } +} + +DataSeries::DataSeries(WriterFrontend* frontend) : WriterBackend(frontend) +{ + ds_compression = string((const char *)BifConst::LogDataSeries::compression->Bytes(), + BifConst::LogDataSeries::compression->Len()); + ds_dump_schema = BifConst::LogDataSeries::dump_schema; + ds_extent_size = BifConst::LogDataSeries::extent_size; + ds_num_threads = BifConst::LogDataSeries::num_threads; + ds_use_integer_for_time = BifConst::LogDataSeries::use_integer_for_time; + ds_set_separator = ","; +} + +DataSeries::~DataSeries() +{ +} + +bool DataSeries::OpenLog(string path) + { + log_file = new DataSeriesSink(path + ".ds", compress_type); + log_file->writeExtentLibrary(log_types); + + for( size_t i = 0; i < schema_list.size(); ++i ) + extents.insert(std::make_pair(schema_list[i].field_name, + GeneralField::create(log_series, schema_list[i].field_name))); + + if ( ds_extent_size < ROW_MIN ) + { + Warning(Fmt("%d is not a valid value for 'rows'. Using min of %d instead", (int)ds_extent_size, (int)ROW_MIN)); + ds_extent_size = ROW_MIN; + } + + else if( ds_extent_size > ROW_MAX ) + { + Warning(Fmt("%d is not a valid value for 'rows'. Using max of %d instead", (int)ds_extent_size, (int)ROW_MAX)); + ds_extent_size = ROW_MAX; + } + + log_output = new OutputModule(*log_file, log_series, log_type, ds_extent_size); + + return true; + } + +bool DataSeries::DoInit(string path, int num_fields, const threading::Field* const * fields) + { + // We first construct an XML schema thing (and, if ds_dump_schema is + // set, dump it to path + ".ds.xml"). Assuming that goes well, we + // use that schema to build our output logfile and prepare it to be + // written to. + + // Note: compressor count must be set *BEFORE* DataSeriesSink is + // instantiated. + if( ds_num_threads < THREAD_MIN && ds_num_threads != 0 ) + { + Warning(Fmt("%d is too few threads! Using %d instead", (int)ds_num_threads, (int)THREAD_MIN)); + ds_num_threads = THREAD_MIN; + } + + if( ds_num_threads > THREAD_MAX ) + { + Warning(Fmt("%d is too many threads! Dropping back to %d", (int)ds_num_threads, (int)THREAD_MAX)); + ds_num_threads = THREAD_MAX; + } + + if( ds_num_threads > 0 ) + DataSeriesSink::setCompressorCount(ds_num_threads); + + for ( int i = 0; i < num_fields; i++ ) + { + const threading::Field* field = fields[i]; + SchemaValue val; + val.ds_type = GetDSFieldType(field); + val.field_name = string(field->name); + val.field_options = GetDSOptionsForType(field); + val.bro_type = field->TypeName(); + schema_list.push_back(val); + } + + string schema = BuildDSSchemaFromFieldTypes(schema_list, path); + + if( ds_dump_schema ) + { + FILE* pFile = fopen ( string(path + ".ds.xml").c_str() , "wb" ); + + if( pFile ) + { + fwrite(schema.c_str(), 1, schema.length(), pFile); + fclose(pFile); + } + + else + Error(Fmt("cannot dump schema: %s", strerror(errno))); + } + + compress_type = Extent::compress_all; + + if( ds_compression == "lzf" ) + compress_type = Extent::compress_lzf; + + else if( ds_compression == "lzo" ) + compress_type = Extent::compress_lzo; + + else if( ds_compression == "gz" ) + compress_type = Extent::compress_gz; + + else if( ds_compression == "bz2" ) + compress_type = Extent::compress_bz2; + + else if( ds_compression == "none" ) + compress_type = Extent::compress_none; + + else if( ds_compression == "any" ) + compress_type = Extent::compress_all; + + else + Warning(Fmt("%s is not a valid compression type. Valid types are: 'lzf', 'lzo', 'gz', 'bz2', 'none', 'any'. Defaulting to 'any'", ds_compression.c_str())); + + log_type = log_types.registerTypePtr(schema); + log_series.setType(log_type); + + return OpenLog(path); + } + +bool DataSeries::DoFlush() +{ + // Flushing is handled by DataSeries automatically, so this function + // doesn't do anything. + return true; +} + +void DataSeries::CloseLog() + { + for( ExtentIterator iter = extents.begin(); iter != extents.end(); ++iter ) + delete iter->second; + + extents.clear(); + + // Don't delete the file before you delete the output, or bad things + // will happen. + delete log_output; + delete log_file; + + log_output = 0; + log_file = 0; + } + +bool DataSeries::DoFinish() +{ + CloseLog(); + + return WriterBackend::DoFinish(); +} + +bool DataSeries::DoWrite(int num_fields, const threading::Field* const * fields, + threading::Value** vals) +{ + log_output->newRecord(); + + for( size_t i = 0; i < (size_t)num_fields; ++i ) + { + ExtentIterator iter = extents.find(fields[i]->name); + assert(iter != extents.end()); + + if( iter != extents.end() ) + { + GeneralField *cField = iter->second; + + if( vals[i]->present ) + cField->set(LogValueToString(vals[i])); + } + } + + return true; +} + +bool DataSeries::DoRotate(string rotated_path, double open, double close, bool terminating) +{ + // Note that if DS files are rotated too often, the aggregate log + // size will be (much) larger. + CloseLog(); + + string dsname = Path() + ".ds"; + string nname = rotated_path + ".ds"; + rename(dsname.c_str(), nname.c_str()); + + if ( ! FinishedRotation(nname, dsname, open, close, terminating) ) + { + Error(Fmt("error rotating %s to %s", dsname.c_str(), nname.c_str())); + return false; + } + + return OpenLog(Path()); +} + +bool DataSeries::DoSetBuf(bool enabled) +{ + // DataSeries is *always* buffered to some degree. This option is ignored. + return true; +} + +#endif /* USE_DATASERIES */ diff --git a/src/logging/writers/DataSeries.h b/src/logging/writers/DataSeries.h new file mode 100644 index 0000000000..0d9ab67e95 --- /dev/null +++ b/src/logging/writers/DataSeries.h @@ -0,0 +1,124 @@ +// See the file "COPYING" in the main distribution directory for copyright. +// +// A binary log writer producing DataSeries output. See doc/data-series.rst +// for more information. + +#ifndef LOGGING_WRITER_DATA_SERIES_H +#define LOGGING_WRITER_DATA_SERIES_H + +#include +#include +#include +#include + +#include "../WriterBackend.h" + +namespace logging { namespace writer { + +class DataSeries : public WriterBackend { +public: + DataSeries(WriterFrontend* frontend); + ~DataSeries(); + + static WriterBackend* Instantiate(WriterFrontend* frontend) + { return new DataSeries(frontend); } + +protected: + // Overidden from WriterBackend. + + virtual bool DoInit(string path, int num_fields, + const threading::Field* const * fields); + + virtual bool DoWrite(int num_fields, const threading::Field* const* fields, + threading::Value** vals); + virtual bool DoSetBuf(bool enabled); + virtual bool DoRotate(string rotated_path, double open, + double close, bool terminating); + virtual bool DoFlush(); + virtual bool DoFinish(); + +private: + static const size_t ROW_MIN = 2048; // Minimum extent size. + static const size_t ROW_MAX = (1024 * 1024 * 100); // Maximum extent size. + static const size_t THREAD_MIN = 1; // Minimum number of compression threads that DataSeries may spawn. + static const size_t THREAD_MAX = 128; // Maximum number of compression threads that DataSeries may spawn. + static const size_t TIME_SCALE = 1000000; // Fixed-point multiplier for time values when converted to integers. + const char* TIME_UNIT() { return "microseconds"; } // DS name for time resolution when converted to integers. Must match TIME_SCALE. + + struct SchemaValue + { + string ds_type; + string bro_type; + string field_name; + string field_options; + }; + + /** + * Turns a log value into a std::string. Uses an ostringstream to do the + * heavy lifting, but still need to switch on the type to know which value + * in the union to give to the string string for processing. + * + * @param val The value we wish to convert to a string + * @return the string value of val + */ + std::string LogValueToString(threading::Value *val); + + /** + * Takes a field type and converts it to a relevant DataSeries type. + * + * @param field We extract the type from this and convert it into a relevant DS type. + * @return String representation of type that DataSeries can understand. + */ + string GetDSFieldType(const threading::Field *field); + + /** + * Are there any options we should put into the XML schema? + * + * @param field We extract the type from this and return any options that make sense for that type. + * @return Options that can be added directly to the XML (e.g. "pack_relative=\"yes\"") + */ + std::string GetDSOptionsForType(const threading::Field *field); + + /** + * Takes a list of types, a list of names, and a title, and uses it to construct a valid DataSeries XML schema + * thing, which is then returned as a std::string + * + * @param opts std::vector of strings containing a list of options to be appended to each field (e.g. "pack_relative=yes") + * @param sTitle Name of this schema. Ideally, these schemas would be aggregated and re-used. + */ + string BuildDSSchemaFromFieldTypes(const vector& vals, string sTitle); + + /** Closes the currently open file. */ + void CloseLog(); + + /** Opens a new file. */ + bool OpenLog(string path); + + typedef std::map ExtentMap; + typedef ExtentMap::iterator ExtentIterator; + + // Internal DataSeries structures we need to keep track of. + vector schema_list; + ExtentTypeLibrary log_types; + ExtentType::Ptr log_type; + ExtentSeries log_series; + ExtentMap extents; + int compress_type; + + DataSeriesSink* log_file; + OutputModule* log_output; + + // Options set from the script-level. + uint64 ds_extent_size; + uint64 ds_num_threads; + string ds_compression; + bool ds_dump_schema; + bool ds_use_integer_for_time; + string ds_set_separator; +}; + +} +} + +#endif + diff --git a/src/main.cc b/src/main.cc index ff33a3859d..19910aebc5 100644 --- a/src/main.cc +++ b/src/main.cc @@ -18,6 +18,8 @@ extern "C" { } #endif +#include + extern "C" void OPENSSL_add_all_algorithms_conf(void); #include "bsd-getopt-long.h" @@ -201,6 +203,27 @@ void usage() fprintf(stderr, " $BRO_LOG_SUFFIX | ASCII log file extension (.%s)\n", logging::writer::Ascii::LogExt().c_str()); fprintf(stderr, " $BRO_PROFILER_FILE | Output file for script execution statistics (not set)\n"); + fprintf(stderr, "\n"); + fprintf(stderr, " Supported log formats: "); + + bool first = true; + list fmts = logging::Manager::SupportedFormats(); + + for ( list::const_iterator i = fmts.begin(); i != fmts.end(); ++i ) + { + if ( *i == "None" ) + // Skip, it's uninteresting. + continue; + + if ( ! first ) + fprintf(stderr, ","); + + fprintf(stderr, "%s", (*i).c_str()); + first = false; + } + + fprintf(stderr, "\n"); + exit(1); } @@ -570,8 +593,7 @@ int main(int argc, char** argv) break; case 'K': - hash_md5(strlen(optarg), (const u_char*) optarg, - shared_hmac_md5_key); + MD5((const u_char*) optarg, strlen(optarg), shared_hmac_md5_key); hmac_key_set = 1; break; diff --git a/src/md5.c b/src/md5.c deleted file mode 100644 index 888993b9c4..0000000000 --- a/src/md5.c +++ /dev/null @@ -1,380 +0,0 @@ -/* - Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved. - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - L. Peter Deutsch - ghost@aladdin.com - - */ -/* - Independent implementation of MD5 (RFC 1321). - - This code implements the MD5 Algorithm defined in RFC 1321, whose - text is available at - http://www.ietf.org/rfc/rfc1321.txt - The code is derived from the text of the RFC, including the test suite - (section A.5) but excluding the rest of Appendix A. It does not include - any code or documentation that is identified in the RFC as being - copyrighted. - - The original and principal author of md5.c is L. Peter Deutsch - . Other authors are noted in the change history - that follows (in reverse chronological order): - - 2002-04-13 lpd Clarified derivation from RFC 1321; now handles byte order - either statically or dynamically; added missing #include - in library. - 2002-03-11 lpd Corrected argument list for main(), and added int return - type, in test program and T value program. - 2002-02-21 lpd Added missing #include in test program. - 2000-07-03 lpd Patched to eliminate warnings about "constant is - unsigned in ANSI C, signed in traditional"; made test program - self-checking. - 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. - 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5). - 1999-05-03 lpd Original version. - */ - -#include "md5.h" -#include - -#undef BYTE_ORDER /* 1 = big-endian, -1 = little-endian, 0 = unknown */ -#ifdef ARCH_IS_BIG_ENDIAN -# define BYTE_ORDER (ARCH_IS_BIG_ENDIAN ? 1 : -1) -#else -# define BYTE_ORDER 0 -#endif - -#define T_MASK ((md5_word_t)~0) -#define T1 /* 0xd76aa478 */ (T_MASK ^ 0x28955b87) -#define T2 /* 0xe8c7b756 */ (T_MASK ^ 0x173848a9) -#define T3 0x242070db -#define T4 /* 0xc1bdceee */ (T_MASK ^ 0x3e423111) -#define T5 /* 0xf57c0faf */ (T_MASK ^ 0x0a83f050) -#define T6 0x4787c62a -#define T7 /* 0xa8304613 */ (T_MASK ^ 0x57cfb9ec) -#define T8 /* 0xfd469501 */ (T_MASK ^ 0x02b96afe) -#define T9 0x698098d8 -#define T10 /* 0x8b44f7af */ (T_MASK ^ 0x74bb0850) -#define T11 /* 0xffff5bb1 */ (T_MASK ^ 0x0000a44e) -#define T12 /* 0x895cd7be */ (T_MASK ^ 0x76a32841) -#define T13 0x6b901122 -#define T14 /* 0xfd987193 */ (T_MASK ^ 0x02678e6c) -#define T15 /* 0xa679438e */ (T_MASK ^ 0x5986bc71) -#define T16 0x49b40821 -#define T17 /* 0xf61e2562 */ (T_MASK ^ 0x09e1da9d) -#define T18 /* 0xc040b340 */ (T_MASK ^ 0x3fbf4cbf) -#define T19 0x265e5a51 -#define T20 /* 0xe9b6c7aa */ (T_MASK ^ 0x16493855) -#define T21 /* 0xd62f105d */ (T_MASK ^ 0x29d0efa2) -#define T22 0x02441453 -#define T23 /* 0xd8a1e681 */ (T_MASK ^ 0x275e197e) -#define T24 /* 0xe7d3fbc8 */ (T_MASK ^ 0x182c0437) -#define T25 0x21e1cde6 -#define T26 /* 0xc33707d6 */ (T_MASK ^ 0x3cc8f829) -#define T27 /* 0xf4d50d87 */ (T_MASK ^ 0x0b2af278) -#define T28 0x455a14ed -#define T29 /* 0xa9e3e905 */ (T_MASK ^ 0x561c16fa) -#define T30 /* 0xfcefa3f8 */ (T_MASK ^ 0x03105c07) -#define T31 0x676f02d9 -#define T32 /* 0x8d2a4c8a */ (T_MASK ^ 0x72d5b375) -#define T33 /* 0xfffa3942 */ (T_MASK ^ 0x0005c6bd) -#define T34 /* 0x8771f681 */ (T_MASK ^ 0x788e097e) -#define T35 0x6d9d6122 -#define T36 /* 0xfde5380c */ (T_MASK ^ 0x021ac7f3) -#define T37 /* 0xa4beea44 */ (T_MASK ^ 0x5b4115bb) -#define T38 0x4bdecfa9 -#define T39 /* 0xf6bb4b60 */ (T_MASK ^ 0x0944b49f) -#define T40 /* 0xbebfbc70 */ (T_MASK ^ 0x4140438f) -#define T41 0x289b7ec6 -#define T42 /* 0xeaa127fa */ (T_MASK ^ 0x155ed805) -#define T43 /* 0xd4ef3085 */ (T_MASK ^ 0x2b10cf7a) -#define T44 0x04881d05 -#define T45 /* 0xd9d4d039 */ (T_MASK ^ 0x262b2fc6) -#define T46 /* 0xe6db99e5 */ (T_MASK ^ 0x1924661a) -#define T47 0x1fa27cf8 -#define T48 /* 0xc4ac5665 */ (T_MASK ^ 0x3b53a99a) -#define T49 /* 0xf4292244 */ (T_MASK ^ 0x0bd6ddbb) -#define T50 0x432aff97 -#define T51 /* 0xab9423a7 */ (T_MASK ^ 0x546bdc58) -#define T52 /* 0xfc93a039 */ (T_MASK ^ 0x036c5fc6) -#define T53 0x655b59c3 -#define T54 /* 0x8f0ccc92 */ (T_MASK ^ 0x70f3336d) -#define T55 /* 0xffeff47d */ (T_MASK ^ 0x00100b82) -#define T56 /* 0x85845dd1 */ (T_MASK ^ 0x7a7ba22e) -#define T57 0x6fa87e4f -#define T58 /* 0xfe2ce6e0 */ (T_MASK ^ 0x01d3191f) -#define T59 /* 0xa3014314 */ (T_MASK ^ 0x5cfebceb) -#define T60 0x4e0811a1 -#define T61 /* 0xf7537e82 */ (T_MASK ^ 0x08ac817d) -#define T62 /* 0xbd3af235 */ (T_MASK ^ 0x42c50dca) -#define T63 0x2ad7d2bb -#define T64 /* 0xeb86d391 */ (T_MASK ^ 0x14792c6e) - - -static void -md5_process(md5_state_t *pms, const md5_byte_t *data /*[64]*/) -{ - md5_word_t - a = pms->abcd[0], b = pms->abcd[1], - c = pms->abcd[2], d = pms->abcd[3]; - md5_word_t t; -#if BYTE_ORDER > 0 - /* Define storage only for big-endian CPUs. */ - md5_word_t X[16]; -#else - /* Define storage for little-endian or both types of CPUs. */ - md5_word_t xbuf[16]; - const md5_word_t *X; -#endif - - { -#if BYTE_ORDER == 0 - /* - * Determine dynamically whether this is a big-endian or - * little-endian machine, since we can use a more efficient - * algorithm on the latter. - */ - static const int w = 1; - - if (*((const md5_byte_t *)&w)) /* dynamic little-endian */ -#endif -#if BYTE_ORDER <= 0 /* little-endian */ - { - /* - * On little-endian machines, we can process properly aligned - * data without copying it. - */ - if (!((data - (const md5_byte_t *)0) & 3)) { - /* data are properly aligned */ - X = (const md5_word_t *)data; - } else { - /* not aligned */ - memcpy(xbuf, data, 64); - X = xbuf; - } - } -#endif -#if BYTE_ORDER == 0 - else /* dynamic big-endian */ -#endif -#if BYTE_ORDER >= 0 /* big-endian */ - { - /* - * On big-endian machines, we must arrange the bytes in the - * right order. - */ - const md5_byte_t *xp = data; - int i; - -# if BYTE_ORDER == 0 - X = xbuf; /* (dynamic only) */ -# else -# define xbuf X /* (static only) */ -# endif - for (i = 0; i < 16; ++i, xp += 4) - xbuf[i] = xp[0] + (xp[1] << 8) + (xp[2] << 16) + (xp[3] << 24); - } -#endif - } - -#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) - - /* Round 1. */ - /* Let [abcd k s i] denote the operation - a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */ -#define F(x, y, z) (((x) & (y)) | (~(x) & (z))) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + F(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 0, 7, T1); - SET(d, a, b, c, 1, 12, T2); - SET(c, d, a, b, 2, 17, T3); - SET(b, c, d, a, 3, 22, T4); - SET(a, b, c, d, 4, 7, T5); - SET(d, a, b, c, 5, 12, T6); - SET(c, d, a, b, 6, 17, T7); - SET(b, c, d, a, 7, 22, T8); - SET(a, b, c, d, 8, 7, T9); - SET(d, a, b, c, 9, 12, T10); - SET(c, d, a, b, 10, 17, T11); - SET(b, c, d, a, 11, 22, T12); - SET(a, b, c, d, 12, 7, T13); - SET(d, a, b, c, 13, 12, T14); - SET(c, d, a, b, 14, 17, T15); - SET(b, c, d, a, 15, 22, T16); -#undef SET - - /* Round 2. */ - /* Let [abcd k s i] denote the operation - a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */ -#define G(x, y, z) (((x) & (z)) | ((y) & ~(z))) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + G(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 1, 5, T17); - SET(d, a, b, c, 6, 9, T18); - SET(c, d, a, b, 11, 14, T19); - SET(b, c, d, a, 0, 20, T20); - SET(a, b, c, d, 5, 5, T21); - SET(d, a, b, c, 10, 9, T22); - SET(c, d, a, b, 15, 14, T23); - SET(b, c, d, a, 4, 20, T24); - SET(a, b, c, d, 9, 5, T25); - SET(d, a, b, c, 14, 9, T26); - SET(c, d, a, b, 3, 14, T27); - SET(b, c, d, a, 8, 20, T28); - SET(a, b, c, d, 13, 5, T29); - SET(d, a, b, c, 2, 9, T30); - SET(c, d, a, b, 7, 14, T31); - SET(b, c, d, a, 12, 20, T32); -#undef SET - - /* Round 3. */ - /* Let [abcd k s t] denote the operation - a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */ -#define H(x, y, z) ((x) ^ (y) ^ (z)) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + H(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 5, 4, T33); - SET(d, a, b, c, 8, 11, T34); - SET(c, d, a, b, 11, 16, T35); - SET(b, c, d, a, 14, 23, T36); - SET(a, b, c, d, 1, 4, T37); - SET(d, a, b, c, 4, 11, T38); - SET(c, d, a, b, 7, 16, T39); - SET(b, c, d, a, 10, 23, T40); - SET(a, b, c, d, 13, 4, T41); - SET(d, a, b, c, 0, 11, T42); - SET(c, d, a, b, 3, 16, T43); - SET(b, c, d, a, 6, 23, T44); - SET(a, b, c, d, 9, 4, T45); - SET(d, a, b, c, 12, 11, T46); - SET(c, d, a, b, 15, 16, T47); - SET(b, c, d, a, 2, 23, T48); -#undef SET - - /* Round 4. */ - /* Let [abcd k s t] denote the operation - a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */ -#define I(x, y, z) ((y) ^ ((x) | ~(z))) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + I(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 0, 6, T49); - SET(d, a, b, c, 7, 10, T50); - SET(c, d, a, b, 14, 15, T51); - SET(b, c, d, a, 5, 21, T52); - SET(a, b, c, d, 12, 6, T53); - SET(d, a, b, c, 3, 10, T54); - SET(c, d, a, b, 10, 15, T55); - SET(b, c, d, a, 1, 21, T56); - SET(a, b, c, d, 8, 6, T57); - SET(d, a, b, c, 15, 10, T58); - SET(c, d, a, b, 6, 15, T59); - SET(b, c, d, a, 13, 21, T60); - SET(a, b, c, d, 4, 6, T61); - SET(d, a, b, c, 11, 10, T62); - SET(c, d, a, b, 2, 15, T63); - SET(b, c, d, a, 9, 21, T64); -#undef SET - - /* Then perform the following additions. (That is increment each - of the four registers by the value it had before this block - was started.) */ - pms->abcd[0] += a; - pms->abcd[1] += b; - pms->abcd[2] += c; - pms->abcd[3] += d; -} - -void -md5_init(md5_state_t *pms) -{ - pms->count[0] = pms->count[1] = 0; - pms->abcd[0] = 0x67452301; - pms->abcd[1] = /*0xefcdab89*/ T_MASK ^ 0x10325476; - pms->abcd[2] = /*0x98badcfe*/ T_MASK ^ 0x67452301; - pms->abcd[3] = 0x10325476; -} - -void -md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes) -{ - const md5_byte_t *p = data; - int left = nbytes; - int offset = (pms->count[0] >> 3) & 63; - md5_word_t nbits = (md5_word_t)(nbytes << 3); - - if (nbytes <= 0) - return; - - /* Update the message length. */ - pms->count[1] += nbytes >> 29; - pms->count[0] += nbits; - if (pms->count[0] < nbits) - pms->count[1]++; - - /* Process an initial partial block. */ - if (offset) { - int copy = (offset + nbytes > 64 ? 64 - offset : nbytes); - - memcpy(pms->buf + offset, p, copy); - if (offset + copy < 64) - return; - p += copy; - left -= copy; - md5_process(pms, pms->buf); - } - - /* Process full blocks. */ - for (; left >= 64; p += 64, left -= 64) - md5_process(pms, p); - - /* Process a final partial block. */ - if (left) - memcpy(pms->buf, p, left); -} - -void -md5_finish(md5_state_t *pms, md5_byte_t digest[16]) -{ - static const md5_byte_t pad[64] = { - 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; - md5_byte_t data[8]; - int i; - - /* Save the length before padding. */ - for (i = 0; i < 8; ++i) - data[i] = (md5_byte_t)(pms->count[i >> 2] >> ((i & 3) << 3)); - /* Pad to 56 bytes mod 64. */ - md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1); - /* Append the length. */ - md5_append(pms, data, 8); - for (i = 0; i < 16; ++i) - digest[i] = (md5_byte_t)(pms->abcd[i >> 2] >> ((i & 3) << 3)); -} diff --git a/src/md5.h b/src/md5.h deleted file mode 100644 index 2806b5b9b5..0000000000 --- a/src/md5.h +++ /dev/null @@ -1,90 +0,0 @@ -/* - Copyright (C) 1999, 2002 Aladdin Enterprises. All rights reserved. - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - L. Peter Deutsch - ghost@aladdin.com - - */ -/* - Independent implementation of MD5 (RFC 1321). - - This code implements the MD5 Algorithm defined in RFC 1321, whose - text is available at - http://www.ietf.org/rfc/rfc1321.txt - The code is derived from the text of the RFC, including the test suite - (section A.5) but excluding the rest of Appendix A. It does not include - any code or documentation that is identified in the RFC as being - copyrighted. - - The original and principal author of md5.h is L. Peter Deutsch - . Other authors are noted in the change history - that follows (in reverse chronological order): - - 2002-04-13 lpd Removed support for non-ANSI compilers; removed - references to Ghostscript; clarified derivation from RFC 1321; - now handles byte order either statically or dynamically. - 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. - 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5); - added conditionalization for C++ compilation from Martin - Purschke . - 1999-05-03 lpd Original version. - */ - -#ifndef md5_INCLUDED -# define md5_INCLUDED - -/* - * This package supports both compile-time and run-time determination of CPU - * byte order. If ARCH_IS_BIG_ENDIAN is defined as 0, the code will be - * compiled to run only on little-endian CPUs; if ARCH_IS_BIG_ENDIAN is - * defined as non-zero, the code will be compiled to run only on big-endian - * CPUs; if ARCH_IS_BIG_ENDIAN is not defined, the code will be compiled to - * run on either big- or little-endian CPUs, but will run slightly less - * efficiently on either one than if ARCH_IS_BIG_ENDIAN is defined. - */ - -typedef unsigned char md5_byte_t; /* 8-bit byte */ -typedef unsigned int md5_word_t; /* 32-bit word */ - -/* Define the state of the MD5 Algorithm. */ -typedef struct md5_state_s { - md5_word_t count[2]; /* message length in bits, lsw first */ - md5_word_t abcd[4]; /* digest buffer */ - md5_byte_t buf[64]; /* accumulate block */ -} md5_state_t; - -#ifdef __cplusplus -extern "C" -{ -#endif - -/* Initialize the algorithm. */ -void md5_init(md5_state_t *pms); - -/* Append a string to the message. */ -void md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes); - -/* Finish the message and return the digest. */ -void md5_finish(md5_state_t *pms, md5_byte_t digest[16]); - -#ifdef __cplusplus -} /* end extern "C" */ -#endif - -#endif /* md5_INCLUDED */ diff --git a/src/ssl-analyzer.pac b/src/ssl-analyzer.pac index f41fb8639b..bf9cf1e0ba 100644 --- a/src/ssl-analyzer.pac +++ b/src/ssl-analyzer.pac @@ -25,6 +25,7 @@ string orig_label(bool is_orig); void free_X509(void *); X509* d2i_X509_binpac(X509** px, const uint8** in, int len); + string handshake_type_label(int type); %} %code{ @@ -46,6 +47,27 @@ string orig_label(bool is_orig) return d2i_X509(px, (u_char**) in, len); #endif } + + string handshake_type_label(int type) + { + switch ( type ) { + case HELLO_REQUEST: return string("HELLO_REQUEST"); + case CLIENT_HELLO: return string("CLIENT_HELLO"); + case SERVER_HELLO: return string("SERVER_HELLO"); + case SESSION_TICKET: return string("SESSION_TICKET"); + case CERTIFICATE: return string("CERTIFICATE"); + case SERVER_KEY_EXCHANGE: return string("SERVER_KEY_EXCHANGE"); + case CERTIFICATE_REQUEST: return string("CERTIFICATE_REQUEST"); + case SERVER_HELLO_DONE: return string("SERVER_HELLO_DONE"); + case CERTIFICATE_VERIFY: return string("CERTIFICATE_VERIFY"); + case CLIENT_KEY_EXCHANGE: return string("CLIENT_KEY_EXCHANGE"); + case FINISHED: return string("FINISHED"); + case CERTIFICATE_URL: return string("CERTIFICATE_URL"); + case CERTIFICATE_STATUS: return string("CERTIFICATE_STATUS"); + default: return string(fmt("UNKNOWN (%d)", type)); + } + } + %} @@ -88,15 +110,15 @@ refine connection SSL_Conn += { eof=0; %} - %eof{ - if ( ! eof && - state_ != STATE_CONN_ESTABLISHED && - state_ != STATE_TRACK_LOST && - state_ != STATE_INITIAL ) - bro_analyzer()->ProtocolViolation(fmt("unexpected end of connection in state %s", - state_label(state_).c_str())); - ++eof; - %} + #%eof{ + # if ( ! eof && + # state_ != STATE_CONN_ESTABLISHED && + # state_ != STATE_TRACK_LOST && + # state_ != STATE_INITIAL ) + # bro_analyzer()->ProtocolViolation(fmt("unexpected end of connection in state %s", + # state_label(state_).c_str())); + # ++eof; + #%} %cleanup{ %} @@ -133,11 +155,6 @@ refine connection SSL_Conn += { cipher_suites16 : uint16[], cipher_suites24 : uint24[]) : bool %{ - if ( state_ == STATE_TRACK_LOST ) - bro_analyzer()->ProtocolViolation(fmt("unexpected client hello message from %s in state %s", - orig_label(${rec.is_orig}).c_str(), - state_label(old_state_).c_str())); - if ( ! version_ok(version) ) bro_analyzer()->ProtocolViolation(fmt("unsupported client SSL version 0x%04x", version)); @@ -175,11 +192,6 @@ refine connection SSL_Conn += { cipher_suites24 : uint24[], comp_method : uint8) : bool %{ - if ( state_ == STATE_TRACK_LOST ) - bro_analyzer()->ProtocolViolation(fmt("unexpected server hello message from %s in state %s", - orig_label(${rec.is_orig}).c_str(), - state_label(old_state_).c_str())); - if ( ! version_ok(version) ) bro_analyzer()->ProtocolViolation(fmt("unsupported server SSL version 0x%04x", version)); else @@ -205,7 +217,7 @@ refine connection SSL_Conn += { return true; %} - + function proc_session_ticket_handshake(rec: SessionTicketHandshake, is_orig: bool): bool %{ if ( ssl_session_ticket_handshake ) @@ -229,11 +241,6 @@ refine connection SSL_Conn += { function proc_certificate(rec: SSLRecord, certificates : bytestring[]) : bool %{ - if ( state_ == STATE_TRACK_LOST ) - bro_analyzer()->ProtocolViolation(fmt("unexpected certificate message from %s in state %s", - orig_label(${rec.is_orig}).c_str(), - state_label(old_state_).c_str())); - if ( certificates->size() == 0 ) return true; @@ -362,6 +369,7 @@ refine connection SSL_Conn += { handshake_type_label(${hs.msg_type}).c_str(), orig_label(is_orig).c_str(), state_label(old_state_).c_str())); + return true; %} diff --git a/src/ssl-defs.pac b/src/ssl-defs.pac index 31d90338f5..b13b7c4881 100644 --- a/src/ssl-defs.pac +++ b/src/ssl-defs.pac @@ -17,35 +17,6 @@ enum ContentType { UNKNOWN_OR_V2_ENCRYPTED = 400 }; -%code{ - string* record_type_label(int type) - { - switch ( type ) { - case CHANGE_CIPHER_SPEC: - return new string("CHANGE_CIPHER_SPEC"); - case ALERT: - return new string("ALERT"); - case HANDSHAKE: - return new string("HANDSHAKE"); - case APPLICATION_DATA: - return new string("APPLICATION_DATA"); - case V2_ERROR: - return new string("V2_ERROR"); - case V2_CLIENT_HELLO: - return new string("V2_CLIENT_HELLO"); - case V2_CLIENT_MASTER_KEY: - return new string("V2_CLIENT_MASTER_KEY"); - case V2_SERVER_HELLO: - return new string("V2_SERVER_HELLO"); - case UNKNOWN_OR_V2_ENCRYPTED: - return new string("UNKNOWN_OR_V2_ENCRYPTED"); - - default: - return new string(fmt("UNEXPECTED (%d)", type)); - } - } -%} - enum SSLVersions { UNKNOWN_VERSION = 0x0000, SSLv20 = 0x0002, diff --git a/src/ssl-protocol.pac b/src/ssl-protocol.pac index 627645e4da..0019478518 100644 --- a/src/ssl-protocol.pac +++ b/src/ssl-protocol.pac @@ -23,7 +23,6 @@ type uint24 = record { string state_label(int state_nr); double get_time_from_asn1(const ASN1_TIME * atime); - string handshake_type_label(int type); %} extern type to_int; @@ -268,28 +267,6 @@ enum HandshakeType { CERTIFICATE_STATUS = 22, # RFC 3546 }; -%code{ - string handshake_type_label(int type) - { - switch ( type ) { - case HELLO_REQUEST: return string("HELLO_REQUEST"); - case CLIENT_HELLO: return string("CLIENT_HELLO"); - case SERVER_HELLO: return string("SERVER_HELLO"); - case SESSION_TICKET: return string("SESSION_TICKET"); - case CERTIFICATE: return string("CERTIFICATE"); - case SERVER_KEY_EXCHANGE: return string("SERVER_KEY_EXCHANGE"); - case CERTIFICATE_REQUEST: return string("CERTIFICATE_REQUEST"); - case SERVER_HELLO_DONE: return string("SERVER_HELLO_DONE"); - case CERTIFICATE_VERIFY: return string("CERTIFICATE_VERIFY"); - case CLIENT_KEY_EXCHANGE: return string("CLIENT_KEY_EXCHANGE"); - case FINISHED: return string("FINISHED"); - case CERTIFICATE_URL: return string("CERTIFICATE_URL"); - case CERTIFICATE_STATUS: return string("CERTIFICATE_STATUS"); - default: return string(fmt("UNKNOWN (%d)", type)); - } - } -%} - ###################################################################### # V3 Change Cipher Spec Protocol (7.1.) @@ -425,6 +402,10 @@ type ServerHello(rec: SSLRecord) = record { session_id : uint8[session_len]; cipher_suite : uint16[1]; compression_method : uint8; + # This weirdness is to deal with the possible existence or absence + # of the following fields. + ext_len: uint16[] &until($element == 0 || $element != 0); + extensions : SSLExtension(rec)[] &until($input.length() == 0); } &let { state_changed : bool = $context.connection.transition(STATE_CLIENT_HELLO_RCVD, diff --git a/src/threading/Manager.h b/src/threading/Manager.h index 7d9ba766d4..ab8189f39d 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -77,6 +77,12 @@ public: */ int NumThreads() const { return all_threads.size(); } + /** Manually triggers processing of any thread input. This can be useful + * if the main thread is waiting for a specific message from a child. + * Usually, though, one should avoid using it. + */ + void ForceProcessing() { Process(); } + protected: friend class BasicThread; friend class MsgThread; diff --git a/src/threading/MsgThread.cc b/src/threading/MsgThread.cc index ddcd3df1dd..dd73fae154 100644 --- a/src/threading/MsgThread.cc +++ b/src/threading/MsgThread.cc @@ -10,13 +10,21 @@ namespace threading { ////// Messages. -// Signals child thread to terminate. This is actually a no-op; its only -// purpose is unblock the current read operation so that the child's Run() -// methods can check the termination status. -class TerminateMessage : public InputMessage +// Signals child thread to shutdown operation. +class FinishMessage : public InputMessage { public: - TerminateMessage(MsgThread* thread) : InputMessage("Terminate", thread) { } + FinishMessage(MsgThread* thread) : InputMessage("Finish", thread) { } + + virtual bool Process() { return Object()->DoFinish(); } +}; + +// A dummy message that's only purpose is unblock the current read operation +// so that the child's Run() methods can check the termination status. +class UnblockMessage : public InputMessage +{ +public: + UnblockMessage(MsgThread* thread) : InputMessage("Unblock", thread) { } virtual bool Process() { return true; } }; @@ -130,13 +138,29 @@ bool ReporterMessage::Process() MsgThread::MsgThread() : BasicThread() { cnt_sent_in = cnt_sent_out = 0; + finished = false; thread_mgr->AddMsgThread(this); } void MsgThread::OnStop() { - // This is to unblock the current queue read operation. - SendIn(new TerminateMessage(this), true); + // Signal thread to terminate and wait until it has acknowledged. + SendIn(new FinishMessage(this), true); + + int cnt = 0; + while ( ! finished ) + { + if ( ++cnt > 1000 ) // Insurance against broken threads ... + { + reporter->Warning("thread %s didn't finish in time", Name().c_str()); + break; + } + + usleep(1000); + } + + // One more message to make sure the current queue read operation unblocks. + SendIn(new UnblockMessage(this), true); } void MsgThread::Heartbeat() @@ -157,6 +181,14 @@ bool MsgThread::DoHeartbeat(double network_time, double current_time) return true; } +bool MsgThread::DoFinish() + { + // This is thread-safe "enough", we're the only one ever writing + // there. + finished = true; + return true; + } + void MsgThread::Info(const char* msg) { SendOut(new ReporterMessage(ReporterMessage::INFO, this, msg)); @@ -189,7 +221,9 @@ void MsgThread::InternalWarning(const char* msg) void MsgThread::InternalError(const char* msg) { - SendOut(new ReporterMessage(ReporterMessage::INTERNAL_ERROR, this, msg)); + // This one aborts immediately. + fprintf(stderr, "internal error in thread: %s\n", msg); + abort(); } #ifdef DEBUG diff --git a/src/threading/MsgThread.h b/src/threading/MsgThread.h index 5ac1c0f780..cd29fe2a44 100644 --- a/src/threading/MsgThread.h +++ b/src/threading/MsgThread.h @@ -171,6 +171,8 @@ public: protected: friend class Manager; friend class HeartbeatMessage; + friend class FinishMessage; + friend class FinishedMessage; /** * Pops a message sent by the child from the child-to-main queue. @@ -215,6 +217,12 @@ protected: */ virtual bool DoHeartbeat(double network_time, double current_time); + /** Triggered for execution in the child thread just before shutting threads down. + * The child thread should finish its operations and then *must* + * call this class' implementation. + */ + virtual bool DoFinish(); + private: /** * Pops a message sent by the main thread from the main-to-chold @@ -270,6 +278,8 @@ private: uint64_t cnt_sent_in; // Counts message sent to child. uint64_t cnt_sent_out; // Counts message sent by child. + + bool finished; // Set to true by Finished message. }; /** diff --git a/src/threading/SerialTypes.cc b/src/threading/SerialTypes.cc index a5692b2ffd..5ab61b0d41 100644 --- a/src/threading/SerialTypes.cc +++ b/src/threading/SerialTypes.cc @@ -24,6 +24,20 @@ bool Field::Write(SerializationFormat* fmt) const return (fmt->Write(name, "name") && fmt->Write((int)type, "type") && fmt->Write((int)subtype, "subtype")); } +string Field::TypeName() const + { + string n = type_name(type); + + if ( (type == TYPE_TABLE) || (type == TYPE_VECTOR) ) + { + n += "["; + n += type_name(subtype); + n += "]"; + } + + return n; + } + Value::~Value() { if ( (type == TYPE_ENUM || type == TYPE_STRING || type == TYPE_FILE || type == TYPE_FUNC) diff --git a/src/threading/SerialTypes.h b/src/threading/SerialTypes.h index db7dc837bd..eee3b750fe 100644 --- a/src/threading/SerialTypes.h +++ b/src/threading/SerialTypes.h @@ -53,6 +53,12 @@ struct Field { * @return False if an error occured. */ bool Write(SerializationFormat* fmt) const; + + /** + * Returns a textual description of the field's type. This method is + * thread-safe. + */ + string TypeName() const; }; /** @@ -132,8 +138,8 @@ struct Value { /** * Returns true if the type can be represented by a Value. If - * `atomic_only` is true, will not permit composite types. - */ + * `atomic_only` is true, will not permit composite types. This + * method is thread-safe. */ static bool IsCompatibleType(BroType* t, bool atomic_only=false); private: diff --git a/src/types.bif b/src/types.bif index 444c33eee9..c795e73b99 100644 --- a/src/types.bif +++ b/src/types.bif @@ -162,6 +162,7 @@ enum Writer %{ WRITER_DEFAULT, WRITER_NONE, WRITER_ASCII, + WRITER_DATASERIES, %} enum ID %{ diff --git a/src/util.cc b/src/util.cc index 856e90d156..90143923f1 100644 --- a/src/util.cc +++ b/src/util.cc @@ -27,6 +27,8 @@ #include #include #include +#include +#include #ifdef HAVE_MALLINFO # include @@ -35,7 +37,6 @@ #include "input.h" #include "util.h" #include "Obj.h" -#include "md5.h" #include "Val.h" #include "NetVar.h" #include "Net.h" @@ -546,24 +547,6 @@ bool is_dir(const char* path) return S_ISDIR(st.st_mode); } -void hash_md5(size_t size, const unsigned char* bytes, unsigned char digest[16]) - { - md5_state_s h; - md5_init(&h); - md5_append(&h, bytes, size); - md5_finish(&h, digest); - } - -const char* md5_digest_print(const unsigned char digest[16]) - { - static char digest_print[256]; - - for ( int i = 0; i < 16; ++i ) - snprintf(digest_print + i * 2, 3, "%02x", digest[i]); - - return digest_print; - } - int hmac_key_set = 0; uint8 shared_hmac_md5_key[16]; @@ -572,12 +555,12 @@ void hmac_md5(size_t size, const unsigned char* bytes, unsigned char digest[16]) if ( ! hmac_key_set ) reporter->InternalError("HMAC-MD5 invoked before the HMAC key is set"); - hash_md5(size, bytes, digest); + MD5(bytes, size, digest); for ( int i = 0; i < 16; ++i ) digest[i] ^= shared_hmac_md5_key[i]; - hash_md5(16, digest, digest); + MD5(digest, 16, digest); } static bool read_random_seeds(const char* read_file, uint32* seed, @@ -724,7 +707,7 @@ void init_random_seed(uint32 seed, const char* read_file, const char* write_file if ( ! hmac_key_set ) { - hash_md5(sizeof(buf), (u_char*) buf, shared_hmac_md5_key); + MD5((const u_char*) buf, sizeof(buf), shared_hmac_md5_key); hmac_key_set = 1; } diff --git a/src/util.h b/src/util.h index a4e3aa71b8..6b237edfd8 100644 --- a/src/util.h +++ b/src/util.h @@ -136,16 +136,12 @@ extern bool ensure_dir(const char *dirname); bool is_dir(const char* path); extern uint8 shared_hmac_md5_key[16]; -extern void hash_md5(size_t size, const unsigned char* bytes, - unsigned char digest[16]); extern int hmac_key_set; extern unsigned char shared_hmac_md5_key[16]; extern void hmac_md5(size_t size, const unsigned char* bytes, unsigned char digest[16]); -extern const char* md5_digest_print(const unsigned char digest[16]); - // Initializes RNGs for bro_random() and MD5 usage. If seed is given, then // it is used (to provide determinism). If load_file is given, the seeds // (both random & MD5) are loaded from that file. This takes precedence diff --git a/testing/btest/Baseline/bifs.md5/output b/testing/btest/Baseline/bifs.md5/output new file mode 100644 index 0000000000..71c0fbfcb8 --- /dev/null +++ b/testing/btest/Baseline/bifs.md5/output @@ -0,0 +1,4 @@ +f97c5d29941bfb1b2fdab0874906ab82 +7b0391feb2e0cd271f1cf39aafb4376f +f97c5d29941bfb1b2fdab0874906ab82 +7b0391feb2e0cd271f1cf39aafb4376f diff --git a/testing/btest/Baseline/bifs.sha1/output b/testing/btest/Baseline/bifs.sha1/output new file mode 100644 index 0000000000..ddcf9060b9 --- /dev/null +++ b/testing/btest/Baseline/bifs.sha1/output @@ -0,0 +1,4 @@ +fe05bcdcdc4928012781a5f1a2a77cbb5398e106 +3e949019500deb1369f13d9644d420d3a920aa5e +fe05bcdcdc4928012781a5f1a2a77cbb5398e106 +3e949019500deb1369f13d9644d420d3a920aa5e diff --git a/testing/btest/Baseline/bifs.sha256/output b/testing/btest/Baseline/bifs.sha256/output new file mode 100644 index 0000000000..5bd6a63fa4 --- /dev/null +++ b/testing/btest/Baseline/bifs.sha256/output @@ -0,0 +1,4 @@ +7692c3ad3540bb803c020b3aee66cd8887123234ea0c6e7143c0add73ff431ed +4592092e1061c7ea85af2aed194621cc17a2762bae33a79bf8ce33fd0168b801 +7692c3ad3540bb803c020b3aee66cd8887123234ea0c6e7143c0add73ff431ed +4592092e1061c7ea85af2aed194621cc17a2762bae33a79bf8ce33fd0168b801 diff --git a/testing/btest/Baseline/core.checksums/bad.out b/testing/btest/Baseline/core.checksums/bad.out index 57089a72a6..44a27f7f0f 100644 --- a/testing/btest/Baseline/core.checksums/bad.out +++ b/testing/btest/Baseline/core.checksums/bad.out @@ -1,13 +1,83 @@ -1332784981.078396 weird: bad_IP_checksum -1332784885.686428 weird: bad_TCP_checksum -1332784933.501023 weird: bad_UDP_checksum -1334075363.536871 weird: bad_ICMP_checksum -1332785210.013051 weird: routing0_hdr -1332785210.013051 weird: bad_TCP_checksum -1332782580.798420 weird: routing0_hdr -1332782580.798420 weird: bad_UDP_checksum -1334075111.800086 weird: routing0_hdr -1334075111.800086 weird: bad_ICMP_checksum -1332785250.469132 weird: bad_TCP_checksum -1332781342.923813 weird: bad_UDP_checksum -1334074939.467194 weird: bad_ICMP_checksum +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332784981.078396 - - - - - bad_IP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332784885.686428 UWkUyAuUGXf 127.0.0.1 30000 127.0.0.1 80 bad_TCP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332784933.501023 UWkUyAuUGXf 127.0.0.1 30000 127.0.0.1 13000 bad_UDP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334075363.536871 UWkUyAuUGXf 192.168.1.100 8 192.168.1.101 0 bad_ICMP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332785210.013051 - - - - - routing0_hdr - F bro +1332785210.013051 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:78:1:32::2 80 bad_TCP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332782580.798420 - - - - - routing0_hdr - F bro +1332782580.798420 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:78:1:32::2 13000 bad_UDP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334075111.800086 - - - - - routing0_hdr - F bro +1334075111.800086 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:78:1:32::1 129 bad_ICMP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332785250.469132 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 80 bad_TCP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332781342.923813 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 13000 bad_UDP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334074939.467194 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:4f8:4:7:2e0:81ff:fe52:9a6b 129 bad_ICMP_checksum - F bro diff --git a/testing/btest/Baseline/core.checksums/good.out b/testing/btest/Baseline/core.checksums/good.out index 4330967d8d..0010974b7f 100644 --- a/testing/btest/Baseline/core.checksums/good.out +++ b/testing/btest/Baseline/core.checksums/good.out @@ -1,3 +1,56 @@ -1332785125.596793 weird: routing0_hdr -1332782508.592037 weird: routing0_hdr -1334075027.053380 weird: routing0_hdr +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334074939.467194 UWkUyAuUGXf 2001:4f8:4:7:2e0:81ff:fe52:ffff 128 2001:4f8:4:7:2e0:81ff:fe52:9a6b 129 bad_ICMP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332785125.596793 - - - - - routing0_hdr - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1332782508.592037 - - - - - routing0_hdr - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334075027.053380 - - - - - routing0_hdr - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334075027.053380 - - - - - routing0_hdr - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334075027.053380 - - - - - routing0_hdr - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334075027.053380 - - - - - routing0_hdr - F bro diff --git a/testing/btest/Baseline/core.disable-mobile-ipv6/output b/testing/btest/Baseline/core.disable-mobile-ipv6/output deleted file mode 100644 index b156353f74..0000000000 --- a/testing/btest/Baseline/core.disable-mobile-ipv6/output +++ /dev/null @@ -1 +0,0 @@ -1333663011.602839 weird: unknown_protocol_135 diff --git a/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log b/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log new file mode 100644 index 0000000000..478cfe8667 --- /dev/null +++ b/testing/btest/Baseline/core.disable-mobile-ipv6/weird.log @@ -0,0 +1,8 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1333663011.602839 - - - - - unknown_protocol_135 - F bro diff --git a/testing/btest/Baseline/core.file-caching-serialization/one0 b/testing/btest/Baseline/core.file-caching-serialization/one0 new file mode 100644 index 0000000000..abfe9a2af6 --- /dev/null +++ b/testing/btest/Baseline/core.file-caching-serialization/one0 @@ -0,0 +1,4 @@ +opened +write 0 +write 3 +write 6 diff --git a/testing/btest/Baseline/core.file-caching-serialization/one1 b/testing/btest/Baseline/core.file-caching-serialization/one1 new file mode 100644 index 0000000000..d53edaed28 --- /dev/null +++ b/testing/btest/Baseline/core.file-caching-serialization/one1 @@ -0,0 +1,4 @@ +opened +write 1 +write 4 +write 7 diff --git a/testing/btest/Baseline/core.file-caching-serialization/one2 b/testing/btest/Baseline/core.file-caching-serialization/one2 new file mode 100644 index 0000000000..5b5c9bc130 --- /dev/null +++ b/testing/btest/Baseline/core.file-caching-serialization/one2 @@ -0,0 +1,4 @@ +opened +write 2 +write 5 +write 8 diff --git a/testing/btest/Baseline/core.file-caching-serialization/two0 b/testing/btest/Baseline/core.file-caching-serialization/two0 new file mode 100644 index 0000000000..88e273032e --- /dev/null +++ b/testing/btest/Baseline/core.file-caching-serialization/two0 @@ -0,0 +1,6 @@ +opened +write 0 +opened +write 3 +opened +write 6 diff --git a/testing/btest/Baseline/core.file-caching-serialization/two1 b/testing/btest/Baseline/core.file-caching-serialization/two1 new file mode 100644 index 0000000000..b2f9350bc4 --- /dev/null +++ b/testing/btest/Baseline/core.file-caching-serialization/two1 @@ -0,0 +1,6 @@ +opened +write 1 +opened +write 4 +opened +write 7 diff --git a/testing/btest/Baseline/core.file-caching-serialization/two2 b/testing/btest/Baseline/core.file-caching-serialization/two2 new file mode 100644 index 0000000000..94a971c7db --- /dev/null +++ b/testing/btest/Baseline/core.file-caching-serialization/two2 @@ -0,0 +1,6 @@ +opened +write 2 +opened +write 5 +opened +write 8 diff --git a/testing/btest/Baseline/core.icmp.icmp-context/output b/testing/btest/Baseline/core.icmp.icmp-context/output index 9e252d8c38..40dc778d8b 100644 --- a/testing/btest/Baseline/core.icmp.icmp-context/output +++ b/testing/btest/Baseline/core.icmp.icmp-context/output @@ -1,12 +1,12 @@ icmp_unreachable (code=0) conn_id: [orig_h=10.0.0.1, orig_p=3/icmp, resp_h=10.0.0.2, resp_p=0/icmp] - icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=3, icode=0, len=0, v6=F] + icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=3, icode=0, len=0, hlim=64, v6=F] icmp_context: [id=[orig_h=::, orig_p=0/unknown, resp_h=::, resp_p=0/unknown], len=0, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) conn_id: [orig_h=10.0.0.1, orig_p=3/icmp, resp_h=10.0.0.2, resp_p=0/icmp] - icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=3, icode=0, len=20, v6=F] + icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=3, icode=0, len=20, hlim=64, v6=F] icmp_context: [id=[orig_h=10.0.0.2, orig_p=0/unknown, resp_h=10.0.0.1, resp_p=0/unknown], len=20, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=3) conn_id: [orig_h=192.168.1.102, orig_p=3/icmp, resp_h=192.168.1.1, resp_p=3/icmp] - icmp_conn: [orig_h=192.168.1.102, resp_h=192.168.1.1, itype=3, icode=3, len=148, v6=F] + icmp_conn: [orig_h=192.168.1.102, resp_h=192.168.1.1, itype=3, icode=3, len=148, hlim=128, v6=F] icmp_context: [id=[orig_h=192.168.1.1, orig_p=53/udp, resp_h=192.168.1.102, resp_p=59207/udp], len=163, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] diff --git a/testing/btest/Baseline/core.icmp.icmp-events/output b/testing/btest/Baseline/core.icmp.icmp-events/output index 9d8f484921..c8c8eb317f 100644 --- a/testing/btest/Baseline/core.icmp.icmp-events/output +++ b/testing/btest/Baseline/core.icmp.icmp-events/output @@ -1,20 +1,20 @@ icmp_unreachable (code=3) conn_id: [orig_h=192.168.1.102, orig_p=3/icmp, resp_h=192.168.1.1, resp_p=3/icmp] - icmp_conn: [orig_h=192.168.1.102, resp_h=192.168.1.1, itype=3, icode=3, len=148, v6=F] + icmp_conn: [orig_h=192.168.1.102, resp_h=192.168.1.1, itype=3, icode=3, len=148, hlim=128, v6=F] icmp_context: [id=[orig_h=192.168.1.1, orig_p=53/udp, resp_h=192.168.1.102, resp_p=59207/udp], len=163, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_time_exceeded (code=0) conn_id: [orig_h=10.0.0.1, orig_p=11/icmp, resp_h=10.0.0.2, resp_p=0/icmp] - icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=11, icode=0, len=32, v6=F] + icmp_conn: [orig_h=10.0.0.1, resp_h=10.0.0.2, itype=11, icode=0, len=32, hlim=64, v6=F] icmp_context: [id=[orig_h=10.0.0.2, orig_p=30000/udp, resp_h=10.0.0.1, resp_p=13000/udp], len=32, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_echo_request (id=34844, seq=0, payload=O\x85\xe0C\0^N\xeb\xff^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z\x1b\x1c\x1d\x1e\x1f !"#$%&'()*+,-./01234567) conn_id: [orig_h=10.0.0.1, orig_p=8/icmp, resp_h=74.125.225.99, resp_p=0/icmp] - icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, v6=F] + icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, hlim=64, v6=F] icmp_echo_reply (id=34844, seq=0, payload=O\x85\xe0C\0^N\xeb\xff^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z\x1b\x1c\x1d\x1e\x1f !"#$%&'()*+,-./01234567) conn_id: [orig_h=10.0.0.1, orig_p=8/icmp, resp_h=74.125.225.99, resp_p=0/icmp] - icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, v6=F] + icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, hlim=64, v6=F] icmp_echo_request (id=34844, seq=1, payload=O\x85\xe0D\0^N\xf0}^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z\x1b\x1c\x1d\x1e\x1f !"#$%&'()*+,-./01234567) conn_id: [orig_h=10.0.0.1, orig_p=8/icmp, resp_h=74.125.225.99, resp_p=0/icmp] - icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, v6=F] + icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, hlim=64, v6=F] icmp_echo_reply (id=34844, seq=1, payload=O\x85\xe0D\0^N\xf0}^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z\x1b\x1c\x1d\x1e\x1f !"#$%&'()*+,-./01234567) conn_id: [orig_h=10.0.0.1, orig_p=8/icmp, resp_h=74.125.225.99, resp_p=0/icmp] - icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, v6=F] + icmp_conn: [orig_h=10.0.0.1, resp_h=74.125.225.99, itype=8, icode=0, len=56, hlim=64, v6=F] diff --git a/testing/btest/Baseline/core.icmp.icmp6-context/output b/testing/btest/Baseline/core.icmp.icmp6-context/output index 4b75210a18..7a83679018 100644 --- a/testing/btest/Baseline/core.icmp.icmp6-context/output +++ b/testing/btest/Baseline/core.icmp.icmp6-context/output @@ -1,16 +1,16 @@ icmp_unreachable (code=0) conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=0, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=0, hlim=64, v6=T] icmp_context: [id=[orig_h=::, orig_p=0/unknown, resp_h=::, resp_p=0/unknown], len=0, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=40, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=40, hlim=64, v6=T] icmp_context: [id=[orig_h=fe80::beef, orig_p=0/unknown, resp_h=fe80::dead, resp_p=0/unknown], len=48, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=60, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=60, hlim=64, v6=T] icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=60, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_unreachable (code=0) conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=48, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=48, hlim=64, v6=T] icmp_context: [id=[orig_h=fe80::beef, orig_p=0/unknown, resp_h=fe80::dead, resp_p=0/unknown], len=48, proto=0, frag_offset=0, bad_hdr_len=T, bad_checksum=F, MF=F, DF=F] diff --git a/testing/btest/Baseline/core.icmp.icmp6-events/output b/testing/btest/Baseline/core.icmp.icmp6-events/output index 1ff26ff889..81075b716a 100644 --- a/testing/btest/Baseline/core.icmp.icmp6-events/output +++ b/testing/btest/Baseline/core.icmp.icmp6-events/output @@ -1,55 +1,68 @@ icmp_unreachable (code=0) conn_id: [orig_h=fe80::dead, orig_p=1/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=60, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=1, icode=0, len=60, hlim=64, v6=T] icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=60, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_packet_too_big (code=0) conn_id: [orig_h=fe80::dead, orig_p=2/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=2, icode=0, len=52, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=2, icode=0, len=52, hlim=64, v6=T] icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_time_exceeded (code=0) conn_id: [orig_h=fe80::dead, orig_p=3/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=3, icode=0, len=52, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=3, icode=0, len=52, hlim=64, v6=T] icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_parameter_problem (code=0) conn_id: [orig_h=fe80::dead, orig_p=4/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=4, icode=0, len=52, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=4, icode=0, len=52, hlim=64, v6=T] icmp_context: [id=[orig_h=fe80::beef, orig_p=30000/udp, resp_h=fe80::dead, resp_p=13000/udp], len=52, proto=2, frag_offset=0, bad_hdr_len=F, bad_checksum=F, MF=F, DF=F] icmp_echo_request (id=1, seq=3, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=3, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_request (id=1, seq=4, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=4, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_request (id=1, seq=5, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=5, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_request (id=1, seq=6, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_echo_reply (id=1, seq=6, payload=abcdefghijklmnopqrstuvwabcdefghi) conn_id: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, orig_p=128/icmp, resp_h=2001:4860:8006::63, resp_p=129/icmp] - icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, v6=T] + icmp_conn: [orig_h=2620:0:e00:400e:d1d:db37:beb:5aac, resp_h=2001:4860:8006::63, itype=128, icode=0, len=32, hlim=128, v6=T] icmp_redirect (tgt=fe80::cafe, dest=fe80::babe) conn_id: [orig_h=fe80::dead, orig_p=137/icmp, resp_h=fe80::beef, resp_p=0/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=137, icode=0, len=32, v6=T] -icmp_router_advertisement (hop_limit=0, managed=F, rlifetime=1800, reachable=0.000000, retrans=0.000000) + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=137, icode=0, len=32, hlim=255, v6=T] +icmp_router_advertisement + cur_hop_limit=13 + managed=T + other=F + home_agent=T + pref=3 + proxy=F + rsv=0 + router_lifetime=30.0 mins + reachable_time=3.0 secs 700.0 msecs + retrans_timer=1.0 sec 300.0 msecs conn_id: [orig_h=fe80::dead, orig_p=134/icmp, resp_h=fe80::beef, resp_p=133/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=134, icode=0, len=8, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=134, icode=0, len=8, hlim=255, v6=T] icmp_neighbor_advertisement (tgt=fe80::babe) + router=T + solicited=F + override=T conn_id: [orig_h=fe80::dead, orig_p=136/icmp, resp_h=fe80::beef, resp_p=135/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=136, icode=0, len=16, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=136, icode=0, len=16, hlim=255, v6=T] icmp_router_solicitation conn_id: [orig_h=fe80::dead, orig_p=133/icmp, resp_h=fe80::beef, resp_p=134/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=133, icode=0, len=0, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=133, icode=0, len=0, hlim=255, v6=T] icmp_neighbor_solicitation (tgt=fe80::babe) conn_id: [orig_h=fe80::dead, orig_p=135/icmp, resp_h=fe80::beef, resp_p=136/icmp] - icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=135, icode=0, len=16, v6=T] + icmp_conn: [orig_h=fe80::dead, resp_h=fe80::beef, itype=135, icode=0, len=16, hlim=255, v6=T] diff --git a/testing/btest/Baseline/core.mobility-checksums/bad.out b/testing/btest/Baseline/core.mobility-checksums/bad.out index 6ea9955402..dfbd5006a9 100644 --- a/testing/btest/Baseline/core.mobility-checksums/bad.out +++ b/testing/btest/Baseline/core.mobility-checksums/bad.out @@ -1,3 +1,24 @@ -1333988844.893456 weird: bad_MH_checksum -1333995733.276730 weird: bad_TCP_checksum -1333995701.656496 weird: bad_UDP_checksum +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1333988844.893456 - - - - - bad_MH_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1333640536.489921 UWkUyAuUGXf 2001:78:1:32::1 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 80 bad_TCP_checksum - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1333640468.146461 UWkUyAuUGXf 2001:78:1:32::1 30000 2001:4f8:4:7:2e0:81ff:fe52:9a6b 13000 bad_UDP_checksum - F bro diff --git a/testing/btest/Baseline/core.mobility-checksums/good.out b/testing/btest/Baseline/core.mobility-checksums/good.out deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/testing/btest/Baseline/core.truncation/output b/testing/btest/Baseline/core.truncation/output index ba8d3eedee..f3d64b8b28 100644 --- a/testing/btest/Baseline/core.truncation/output +++ b/testing/btest/Baseline/core.truncation/output @@ -1,3 +1,24 @@ -1334160095.895421 weird: truncated_IP -1334156241.519125 weird: truncated_IP -1334094648.590126 weird: truncated_IP +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334160095.895421 - - - - - truncated_IP - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334156241.519125 - - - - - truncated_IP - F bro +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path weird +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p name addl notice peer +#types time string addr port addr port string string bool string +1334094648.590126 - - - - - truncated_IP - F bro diff --git a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log index d43367f300..2936e3b698 100644 --- a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log @@ -19,4 +19,5 @@ scripts/base/init-bare.bro scripts/base/frameworks/logging/./postprocessors/./scp.bro scripts/base/frameworks/logging/./postprocessors/./sftp.bro scripts/base/frameworks/logging/./writers/ascii.bro + scripts/base/frameworks/logging/./writers/dataseries.bro scripts/policy/misc/loaded-scripts.bro diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index 88a85fc827..27846e6e82 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -19,6 +19,7 @@ scripts/base/init-bare.bro scripts/base/frameworks/logging/./postprocessors/./scp.bro scripts/base/frameworks/logging/./postprocessors/./sftp.bro scripts/base/frameworks/logging/./writers/ascii.bro + scripts/base/frameworks/logging/./writers/dataseries.bro scripts/base/init-default.bro scripts/base/utils/site.bro scripts/base/utils/./patterns.bro diff --git a/testing/btest/Baseline/istate.pybroccoli/bro..stdout b/testing/btest/Baseline/istate.pybroccoli/bro..stdout index 70ca69dd98..a5d20b1f2a 100644 --- a/testing/btest/Baseline/istate.pybroccoli/bro..stdout +++ b/testing/btest/Baseline/istate.pybroccoli/bro..stdout @@ -1,7 +1,7 @@ ==== atomic -10 2 -1330035434.516896 +1336411585.166009 2.0 mins F 1.5 diff --git a/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered b/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered index 5d98e2d759..a44a95bd69 100644 --- a/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered +++ b/testing/btest/Baseline/istate.pybroccoli/python..stdout.filtered @@ -1,7 +1,7 @@ ==== atomic a 1 ==== -4L -4 42 42 -1330035434.5180 +1336411585.1711 60.0 True True 3.14 @@ -14,7 +14,7 @@ True True ==== atomic a 2 ==== -10L -10 2 2 -1330035434.5169 +1336411585.1660 120.0 False False 1.5 @@ -27,7 +27,7 @@ False False ==== atomic b 2 ==== -10L -10 2 - 1330035434.5169 + 1336411585.1660 120.0 False False 1.5 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml new file mode 100644 index 0000000000..cacc3b0ea4 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out new file mode 100644 index 0000000000..1e5e1b05c6 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out @@ -0,0 +1,290 @@ +test.2011-03-07-03-00-05.ds test 11-03-07_03.00.05 11-03-07_04.00.05 0 dataseries +test.2011-03-07-04-00-05.ds test 11-03-07_04.00.05 11-03-07_05.00.05 0 dataseries +test.2011-03-07-05-00-05.ds test 11-03-07_05.00.05 11-03-07_06.00.05 0 dataseries +test.2011-03-07-06-00-05.ds test 11-03-07_06.00.05 11-03-07_07.00.05 0 dataseries +test.2011-03-07-07-00-05.ds test 11-03-07_07.00.05 11-03-07_08.00.05 0 dataseries +test.2011-03-07-08-00-05.ds test 11-03-07_08.00.05 11-03-07_09.00.05 0 dataseries +test.2011-03-07-09-00-05.ds test 11-03-07_09.00.05 11-03-07_10.00.05 0 dataseries +test.2011-03-07-10-00-05.ds test 11-03-07_10.00.05 11-03-07_11.00.05 0 dataseries +test.2011-03-07-11-00-05.ds test 11-03-07_11.00.05 11-03-07_12.00.05 0 dataseries +test.2011-03-07-12-00-05.ds test 11-03-07_12.00.05 11-03-07_12.59.55 1 dataseries +> test.2011-03-07-03-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1299466805.000000 10.0.0.1 20 10.0.0.2 1024 +1299470395.000000 10.0.0.2 20 10.0.0.3 0 +> test.2011-03-07-04-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1299470405.000000 10.0.0.1 20 10.0.0.2 1025 +1299473995.000000 10.0.0.2 20 10.0.0.3 1 +> test.2011-03-07-05-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1299474005.000000 10.0.0.1 20 10.0.0.2 1026 +1299477595.000000 10.0.0.2 20 10.0.0.3 2 +> test.2011-03-07-06-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1299477605.000000 10.0.0.1 20 10.0.0.2 1027 +1299481195.000000 10.0.0.2 20 10.0.0.3 3 +> test.2011-03-07-07-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1299481205.000000 10.0.0.1 20 10.0.0.2 1028 +1299484795.000000 10.0.0.2 20 10.0.0.3 4 +> test.2011-03-07-08-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1299484805.000000 10.0.0.1 20 10.0.0.2 1029 +1299488395.000000 10.0.0.2 20 10.0.0.3 5 +> test.2011-03-07-09-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1299488405.000000 10.0.0.1 20 10.0.0.2 1030 +1299491995.000000 10.0.0.2 20 10.0.0.3 6 +> test.2011-03-07-10-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1299492005.000000 10.0.0.1 20 10.0.0.2 1031 +1299495595.000000 10.0.0.2 20 10.0.0.3 7 +> test.2011-03-07-11-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1299495605.000000 10.0.0.1 20 10.0.0.2 1032 +1299499195.000000 10.0.0.2 20 10.0.0.3 8 +> test.2011-03-07-12-00-05.ds +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + +# Extent, type='test' +t id.orig_h id.orig_p id.resp_h id.resp_p +1299499205.000000 10.0.0.1 20 10.0.0.2 1033 +1299502795.000000 10.0.0.2 20 10.0.0.3 9 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt new file mode 100644 index 0000000000..e9640dfd9d --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt @@ -0,0 +1,34 @@ +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + + + + + +# Extent, type='ssh' +t id.orig_h id.orig_p id.resp_h id.resp_p status country +1337216256.956476 1.2.3.4 1234 2.3.4.5 80 success unknown +1337216256.956476 1.2.3.4 1234 2.3.4.5 80 failure US +1337216256.956476 1.2.3.4 1234 2.3.4.5 80 failure UK +1337216256.956476 1.2.3.4 1234 2.3.4.5 80 success BR +1337216256.956476 1.2.3.4 1234 2.3.4.5 80 failure MX diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt new file mode 100644 index 0000000000..1d7cba3b3c --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt @@ -0,0 +1,87 @@ +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# Extent, type='conn' +ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes +1300475167096535 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 +1300475167097012 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0 0 0 S0 F 0 D 1 199 0 0 +1300475167099816 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp 0 0 0 S0 F 0 D 1 179 0 0 +1300475168853899 TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 435 0 89 SHR F 0 Cd 0 0 1 117 +1300475168854378 FrJExwHcSal 141.142.220.118 37676 141.142.2.2 53 udp dns 420 0 99 SHR F 0 Cd 0 0 1 127 +1300475168854837 5OKnoww6xl4 141.142.220.118 40526 141.142.2.2 53 udp dns 391 0 183 SHR F 0 Cd 0 0 1 211 +1300475168857956 3PKsZ2Uye21 141.142.220.118 32902 141.142.2.2 53 udp dns 317 0 89 SHR F 0 Cd 0 0 1 117 +1300475168858306 VW0XPVINV8a 141.142.220.118 59816 141.142.2.2 53 udp dns 343 0 99 SHR F 0 Cd 0 0 1 127 +1300475168858713 fRFu0wcOle6 141.142.220.118 59714 141.142.2.2 53 udp dns 375 0 183 SHR F 0 Cd 0 0 1 211 +1300475168891644 qSsw6ESzHV4 141.142.220.118 58206 141.142.2.2 53 udp dns 339 0 89 SHR F 0 Cd 0 0 1 117 +1300475168892037 iE6yhOq3SF 141.142.220.118 38911 141.142.2.2 53 udp dns 334 0 99 SHR F 0 Cd 0 0 1 127 +1300475168892414 GSxOnSLghOa 141.142.220.118 59746 141.142.2.2 53 udp dns 420 0 183 SHR F 0 Cd 0 0 1 211 +1300475168893988 qCaWGmzFtM5 141.142.220.118 45000 141.142.2.2 53 udp dns 384 0 89 SHR F 0 Cd 0 0 1 117 +1300475168894422 70MGiRM1Qf4 141.142.220.118 48479 141.142.2.2 53 udp dns 316 0 99 SHR F 0 Cd 0 0 1 127 +1300475168894787 h5DsfNtYzi1 141.142.220.118 48128 141.142.2.2 53 udp dns 422 0 183 SHR F 0 Cd 0 0 1 211 +1300475168901749 P654jzLoe3a 141.142.220.118 56056 141.142.2.2 53 udp dns 402 0 131 SHR F 0 Cd 0 0 1 159 +1300475168902195 Tw8jXtpTGu6 141.142.220.118 55092 141.142.2.2 53 udp dns 374 0 198 SHR F 0 Cd 0 0 1 226 +1300475169899438 BWaU4aSuwkc 141.142.220.44 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 85 0 0 +1300475170862384 10XodEwRycf 141.142.220.226 137 141.142.220.255 137 udp dns 2613016 350 0 S0 F 0 D 7 546 0 0 +1300475171675372 zno26fFZkrh fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 100096 66 0 S0 F 0 D 2 162 0 0 +1300475171677081 v5rgkJBig5l 141.142.220.226 55131 224.0.0.252 5355 udp dns 100020 66 0 S0 F 0 D 2 122 0 0 +1300475173116749 eWZCH7OONC1 fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 99801 66 0 S0 F 0 D 2 162 0 0 +1300475173117362 0Pwk3ntf8O3 141.142.220.226 55671 224.0.0.252 5355 udp dns 99848 66 0 S0 F 0 D 2 122 0 0 +1300475173153679 0HKorjr8Zp7 141.142.220.238 56641 141.142.220.255 137 udp dns 0 0 0 S0 F 0 D 1 78 0 0 +1300475168859163 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 tcp 215893 1130 734 S1 F 1130 ShACad 4 216 4 950 +1300475168652003 nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp 61328 0 350 OTH F 0 CdA 1 52 1 402 +1300475168895267 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 tcp 227283 1178 734 S1 F 1178 ShACad 4 216 4 950 +1300475168902635 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 tcp 120040 534 412 S1 F 534 ShACad 3 164 3 576 +1300475168892936 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 tcp 229603 1148 734 S1 F 1148 ShACad 4 216 4 950 +1300475168855305 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 tcp 218501 1171 733 S1 F 1171 ShACad 4 216 4 949 +1300475168892913 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 tcp 220960 1137 733 S1 F 1137 ShACad 4 216 4 949 +1300475169780331 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp 0 0 0 OTH F 0 h 0 0 1 48 +1300475168724007 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp 119904 525 232 S1 F 525 ShACad 3 164 3 396 +1300475168855330 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp 219720 1125 734 S1 F 1125 ShACad 4 216 4 950 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt new file mode 100644 index 0000000000..3cafa078de --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt @@ -0,0 +1,87 @@ +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# Extent, type='conn' +ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes +1300475167.096535 UWkUyAuUGXf 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 +1300475167.097012 arKYeMETxOg fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0.000000 0 0 S0 F 0 D 1 199 0 0 +1300475167.099816 k6kgXLOoSKl 141.142.220.50 5353 224.0.0.251 5353 udp 0.000000 0 0 S0 F 0 D 1 179 0 0 +1300475168.853899 TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 0 89 SHR F 0 Cd 0 0 1 117 +1300475168.854378 FrJExwHcSal 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 0 99 SHR F 0 Cd 0 0 1 127 +1300475168.854837 5OKnoww6xl4 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 0 183 SHR F 0 Cd 0 0 1 211 +1300475168.857956 3PKsZ2Uye21 141.142.220.118 32902 141.142.2.2 53 udp dns 0.000317 0 89 SHR F 0 Cd 0 0 1 117 +1300475168.858306 VW0XPVINV8a 141.142.220.118 59816 141.142.2.2 53 udp dns 0.000343 0 99 SHR F 0 Cd 0 0 1 127 +1300475168.858713 fRFu0wcOle6 141.142.220.118 59714 141.142.2.2 53 udp dns 0.000375 0 183 SHR F 0 Cd 0 0 1 211 +1300475168.891644 qSsw6ESzHV4 141.142.220.118 58206 141.142.2.2 53 udp dns 0.000339 0 89 SHR F 0 Cd 0 0 1 117 +1300475168.892037 iE6yhOq3SF 141.142.220.118 38911 141.142.2.2 53 udp dns 0.000335 0 99 SHR F 0 Cd 0 0 1 127 +1300475168.892414 GSxOnSLghOa 141.142.220.118 59746 141.142.2.2 53 udp dns 0.000421 0 183 SHR F 0 Cd 0 0 1 211 +1300475168.893988 qCaWGmzFtM5 141.142.220.118 45000 141.142.2.2 53 udp dns 0.000384 0 89 SHR F 0 Cd 0 0 1 117 +1300475168.894422 70MGiRM1Qf4 141.142.220.118 48479 141.142.2.2 53 udp dns 0.000317 0 99 SHR F 0 Cd 0 0 1 127 +1300475168.894787 h5DsfNtYzi1 141.142.220.118 48128 141.142.2.2 53 udp dns 0.000423 0 183 SHR F 0 Cd 0 0 1 211 +1300475168.901749 P654jzLoe3a 141.142.220.118 56056 141.142.2.2 53 udp dns 0.000402 0 131 SHR F 0 Cd 0 0 1 159 +1300475168.902195 Tw8jXtpTGu6 141.142.220.118 55092 141.142.2.2 53 udp dns 0.000374 0 198 SHR F 0 Cd 0 0 1 226 +1300475169.899438 BWaU4aSuwkc 141.142.220.44 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 85 0 0 +1300475170.862384 10XodEwRycf 141.142.220.226 137 141.142.220.255 137 udp dns 2.613017 350 0 S0 F 0 D 7 546 0 0 +1300475171.675372 zno26fFZkrh fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 0.100096 66 0 S0 F 0 D 2 162 0 0 +1300475171.677081 v5rgkJBig5l 141.142.220.226 55131 224.0.0.252 5355 udp dns 0.100021 66 0 S0 F 0 D 2 122 0 0 +1300475173.116749 eWZCH7OONC1 fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 0.099801 66 0 S0 F 0 D 2 162 0 0 +1300475173.117362 0Pwk3ntf8O3 141.142.220.226 55671 224.0.0.252 5355 udp dns 0.099849 66 0 S0 F 0 D 2 122 0 0 +1300475173.153679 0HKorjr8Zp7 141.142.220.238 56641 141.142.220.255 137 udp dns 0.000000 0 0 S0 F 0 D 1 78 0 0 +1300475168.859163 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 tcp 0.215893 1130 734 S1 F 1130 ShACad 4 216 4 950 +1300475168.652003 nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp 0.061329 0 350 OTH F 0 CdA 1 52 1 402 +1300475168.895267 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 tcp 0.227284 1178 734 S1 F 1178 ShACad 4 216 4 950 +1300475168.902635 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 tcp 0.120041 534 412 S1 F 534 ShACad 3 164 3 576 +1300475168.892936 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 tcp 0.229603 1148 734 S1 F 1148 ShACad 4 216 4 950 +1300475168.855305 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 tcp 0.218501 1171 733 S1 F 1171 ShACad 4 216 4 949 +1300475168.892913 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 tcp 0.220961 1137 733 S1 F 1137 ShACad 4 216 4 949 +1300475169.780331 2cx26uAvUPl 141.142.220.235 6705 173.192.163.128 80 tcp 0.000000 0 0 OTH F 0 h 0 0 1 48 +1300475168.724007 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 tcp 0.119905 525 232 S1 F 525 ShACad 3 164 3 396 +1300475168.855330 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp 0.219720 1125 734 S1 F 1125 ShACad 4 216 4 950 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt new file mode 100644 index 0000000000..adb7bb3f7b --- /dev/null +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt @@ -0,0 +1,81 @@ +# Extent Types ... + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# Extent, type='http' +ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied mime_type md5 extraction_file +1300475168.843894 j4u32Pc5bif 141.142.220.118 48649 208.80.152.118 80 0 0 0 304 Not Modified 0 +1300475168.975800 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475168.976327 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475168.979160 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.012666 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.012730 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.014860 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.022665 i2rO3KD1Syg 141.142.220.118 35642 208.80.152.2 80 0 0 0 304 Not Modified 0 +1300475169.036294 c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.036798 EAr0uf4mhq 141.142.220.118 49996 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.039923 GvmoxJFXdTa 141.142.220.118 49998 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.074793 0Q4FH8sESw5 141.142.220.118 50000 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.074938 slFea8xwSmb 141.142.220.118 49999 208.80.152.3 80 0 0 0 304 Not Modified 0 +1300475169.075065 UfGkYA2HI2g 141.142.220.118 50001 208.80.152.3 80 0 0 0 304 Not Modified 0 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log index 291909b80a..c2c69f3153 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.path-func-column-demote/local.log @@ -5,15 +5,15 @@ #path local #fields ts id.orig_h #types time addr -1300475168.855330 141.142.220.118 +1300475168.859163 141.142.220.118 1300475168.652003 141.142.220.118 1300475168.895267 141.142.220.118 +1300475168.902635 141.142.220.118 +1300475168.892936 141.142.220.118 1300475168.855305 141.142.220.118 -1300475168.859163 141.142.220.118 1300475168.892913 141.142.220.118 1300475168.724007 141.142.220.118 -1300475168.892936 141.142.220.118 -1300475168.902635 141.142.220.118 +1300475168.855330 141.142.220.118 1300475168.891644 141.142.220.118 1300475170.862384 141.142.220.226 1300475168.853899 141.142.220.118 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/.stderr b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/.stderr index 0954137b7e..e1958d67ad 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/.stderr +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate-custom/.stderr @@ -1,10 +1,10 @@ -1st test.2011-03-07-03-00-05.log test 11-03-07_03.00.05 11-03-07_04.00.05 0 -1st test.2011-03-07-04-00-05.log test 11-03-07_04.00.05 11-03-07_05.00.05 0 -1st test.2011-03-07-05-00-05.log test 11-03-07_05.00.05 11-03-07_06.00.05 0 -1st test.2011-03-07-06-00-05.log test 11-03-07_06.00.05 11-03-07_07.00.05 0 -1st test.2011-03-07-07-00-05.log test 11-03-07_07.00.05 11-03-07_08.00.05 0 -1st test.2011-03-07-08-00-05.log test 11-03-07_08.00.05 11-03-07_09.00.05 0 -1st test.2011-03-07-09-00-05.log test 11-03-07_09.00.05 11-03-07_10.00.05 0 -1st test.2011-03-07-10-00-05.log test 11-03-07_10.00.05 11-03-07_11.00.05 0 -1st test.2011-03-07-11-00-05.log test 11-03-07_11.00.05 11-03-07_12.00.05 0 -1st test.2011-03-07-12-00-05.log test 11-03-07_12.00.05 11-03-07_12.59.55 1 +1st test.2011-03-07-03-00-05.log test 11-03-07_03.00.05 11-03-07_04.00.05 0 ascii +1st test.2011-03-07-04-00-05.log test 11-03-07_04.00.05 11-03-07_05.00.05 0 ascii +1st test.2011-03-07-05-00-05.log test 11-03-07_05.00.05 11-03-07_06.00.05 0 ascii +1st test.2011-03-07-06-00-05.log test 11-03-07_06.00.05 11-03-07_07.00.05 0 ascii +1st test.2011-03-07-07-00-05.log test 11-03-07_07.00.05 11-03-07_08.00.05 0 ascii +1st test.2011-03-07-08-00-05.log test 11-03-07_08.00.05 11-03-07_09.00.05 0 ascii +1st test.2011-03-07-09-00-05.log test 11-03-07_09.00.05 11-03-07_10.00.05 0 ascii +1st test.2011-03-07-10-00-05.log test 11-03-07_10.00.05 11-03-07_11.00.05 0 ascii +1st test.2011-03-07-11-00-05.log test 11-03-07_11.00.05 11-03-07_12.00.05 0 ascii +1st test.2011-03-07-12-00-05.log test 11-03-07_12.00.05 11-03-07_12.59.55 1 ascii diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate/out b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate/out index d31783edc4..c335b5eeb9 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.rotate/out +++ b/testing/btest/Baseline/scripts.base.frameworks.logging.rotate/out @@ -1,13 +1,13 @@ -test.2011-03-07-03-00-05.log test 11-03-07_03.00.05 11-03-07_04.00.05 0 -test.2011-03-07-04-00-05.log test 11-03-07_04.00.05 11-03-07_05.00.05 0 -test.2011-03-07-05-00-05.log test 11-03-07_05.00.05 11-03-07_06.00.05 0 -test.2011-03-07-06-00-05.log test 11-03-07_06.00.05 11-03-07_07.00.05 0 -test.2011-03-07-07-00-05.log test 11-03-07_07.00.05 11-03-07_08.00.05 0 -test.2011-03-07-08-00-05.log test 11-03-07_08.00.05 11-03-07_09.00.05 0 -test.2011-03-07-09-00-05.log test 11-03-07_09.00.05 11-03-07_10.00.05 0 -test.2011-03-07-10-00-05.log test 11-03-07_10.00.05 11-03-07_11.00.05 0 -test.2011-03-07-11-00-05.log test 11-03-07_11.00.05 11-03-07_12.00.05 0 -test.2011-03-07-12-00-05.log test 11-03-07_12.00.05 11-03-07_12.59.55 1 +test.2011-03-07-03-00-05.log test 11-03-07_03.00.05 11-03-07_04.00.05 0 ascii +test.2011-03-07-04-00-05.log test 11-03-07_04.00.05 11-03-07_05.00.05 0 ascii +test.2011-03-07-05-00-05.log test 11-03-07_05.00.05 11-03-07_06.00.05 0 ascii +test.2011-03-07-06-00-05.log test 11-03-07_06.00.05 11-03-07_07.00.05 0 ascii +test.2011-03-07-07-00-05.log test 11-03-07_07.00.05 11-03-07_08.00.05 0 ascii +test.2011-03-07-08-00-05.log test 11-03-07_08.00.05 11-03-07_09.00.05 0 ascii +test.2011-03-07-09-00-05.log test 11-03-07_09.00.05 11-03-07_10.00.05 0 ascii +test.2011-03-07-10-00-05.log test 11-03-07_10.00.05 11-03-07_11.00.05 0 ascii +test.2011-03-07-11-00-05.log test 11-03-07_11.00.05 11-03-07_12.00.05 0 ascii +test.2011-03-07-12-00-05.log test 11-03-07_12.00.05 11-03-07_12.59.55 1 ascii > test.2011-03-07-03-00-05.log #separator \x09 #set_separator , diff --git a/testing/btest/Baseline/scripts.base.protocols.conn.contents-default-extract/contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_orig.dat b/testing/btest/Baseline/scripts.base.protocols.conn.contents-default-extract/contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_orig.dat new file mode 100644 index 0000000000..056ab8a44c --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.conn.contents-default-extract/contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_orig.dat @@ -0,0 +1,22 @@ +USER anonymous +PASS test +SYST +FEAT +PWD +EPSV +LIST +EPSV +NLST +TYPE I +SIZE robots.txt +EPSV +RETR robots.txt +MDTM robots.txt +SIZE robots.txt +EPRT |2|2001:470:1f11:81f:c999:d94:aa7c:2e3e|49189| +RETR robots.txt +MDTM robots.txt +TYPE A +EPRT |2|2001:470:1f11:81f:c999:d94:aa7c:2e3e|49190| +LIST +QUIT diff --git a/testing/btest/Baseline/scripts.base.protocols.conn.contents-default-extract/contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_resp.dat b/testing/btest/Baseline/scripts.base.protocols.conn.contents-default-extract/contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_resp.dat new file mode 100644 index 0000000000..05fe8b57d8 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.conn.contents-default-extract/contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_resp.dat @@ -0,0 +1,73 @@ +220 ftp.NetBSD.org FTP server (NetBSD-ftpd 20100320) ready. +331 Guest login ok, type your name as password. +230- + The NetBSD Project FTP Server located in Redwood City, CA, USA + 1 Gbps connectivity courtesy of , , + Internet Systems Consortium WELCOME! /( )` + \ \___ / | + +--- Currently Supported Platforms ----+ /- _ `-/ ' + | acorn[26,32], algor, alpha, amd64, | (/\/ \ \ /\ + | amiga[,ppc], arc, atari, bebox, | / / | ` \ + | cats, cesfic, cobalt, dreamcast, | O O ) / | + | evb[arm,mips,ppc,sh3], hp[300,700], | `-^--'`< ' + | hpc[arm,mips,sh], i386, | (_.) _ ) / + | ibmnws, iyonix, luna68k, | .___/` / + | mac[m68k,ppc], mipsco, mmeye, | `-----' / + | mvme[m68k,ppc], netwinders, | <----. __ / __ \ + | news[m68k,mips], next68k, ofppc, | <----|====O)))==) \) /==== + | playstation2, pmax, prep, sandpoint, | <----' `--' `.__,' \ + | sbmips, sgimips, shark, sparc[,64], | | | + | sun[2,3], vax, x68k, xen | \ / + +--------------------------------------+ ______( (_ / \_____ + See our website at http://www.NetBSD.org/ ,' ,-----' | \ + We log all FTP transfers and commands. `--{__________) (FL) \/ +230- + EXPORT NOTICE + + Please note that portions of this FTP site contain cryptographic + software controlled under the Export Administration Regulations (EAR). + + None of this software may be downloaded or otherwise exported or + re-exported into (or to a national or resident of) Cuba, Iran, Libya, + Sudan, North Korea, Syria or any other country to which the U.S. has + embargoed goods. + + By downloading or using said software, you are agreeing to the + foregoing and you are representing and warranting that you are not + located in, under the control of, or a national or resident of any + such country or on any such list. +230 Guest login ok, access restrictions apply. +215 UNIX Type: L8 Version: NetBSD-ftpd 20100320 +211-Features supported + MDTM + MLST Type*;Size*;Modify*;Perm*;Unique*; + REST STREAM + SIZE + TVFS +211 End +257 "/" is the current directory. +229 Entering Extended Passive Mode (|||57086|) +150 Opening ASCII mode data connection for '/bin/ls'. +226 Transfer complete. +229 Entering Extended Passive Mode (|||57087|) +150 Opening ASCII mode data connection for 'file list'. +226 Transfer complete. +200 Type set to I. +213 77 +229 Entering Extended Passive Mode (|||57088|) +150 Opening BINARY mode data connection for 'robots.txt' (77 bytes). +226 Transfer complete. +213 20090816112038 +213 77 +200 EPRT command successful. +150 Opening BINARY mode data connection for 'robots.txt' (77 bytes). +226 Transfer complete. +213 20090816112038 +200 Type set to A. +200 EPRT command successful. +150 Opening ASCII mode data connection for '/bin/ls'. +226 Transfer complete. +221- + Data traffic for this session was 154 bytes in 2 files. + Total traffic for this session was 4512 bytes in 5 transfers. +221 Thank you for using the FTP service on ftp.NetBSD.org. diff --git a/testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log b/testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log new file mode 100644 index 0000000000..74156362e5 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.ssl.basic/ssl.log @@ -0,0 +1,8 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path ssl +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p version cipher server_name session_id subject issuer_subject not_valid_before not_valid_after last_alert +#types time string addr port addr port string string string string string string time time string +1335538392.319381 UWkUyAuUGXf 192.168.1.105 62045 74.125.224.79 443 TLSv10 TLS_ECDHE_RSA_WITH_RC4_128_SHA ssl.gstatic.com - CN=*.gstatic.com,O=Google Inc,L=Mountain View,ST=California,C=US CN=Google Internet Authority,O=Google Inc,C=US 1334102677.000000 1365639277.000000 - diff --git a/testing/btest/Traces/icmp/icmp6-router-advert.pcap b/testing/btest/Traces/icmp/icmp6-router-advert.pcap index 9c3e557a9d..38de434c2f 100644 Binary files a/testing/btest/Traces/icmp/icmp6-router-advert.pcap and b/testing/btest/Traces/icmp/icmp6-router-advert.pcap differ diff --git a/testing/btest/scripts/base/frameworks/logging/rotation.trace b/testing/btest/Traces/rotation.trace similarity index 100% rename from testing/btest/scripts/base/frameworks/logging/rotation.trace rename to testing/btest/Traces/rotation.trace diff --git a/testing/btest/Traces/tls-conn-with-extensions.trace b/testing/btest/Traces/tls-conn-with-extensions.trace new file mode 100644 index 0000000000..a3b724b3a1 Binary files /dev/null and b/testing/btest/Traces/tls-conn-with-extensions.trace differ diff --git a/testing/btest/bifs/md5.test b/testing/btest/bifs/md5.test new file mode 100644 index 0000000000..2632d76cb4 --- /dev/null +++ b/testing/btest/bifs/md5.test @@ -0,0 +1,16 @@ +# @TEST-EXEC: bro -b %INPUT >output +# @TEST-EXEC: btest-diff output + +print md5_hash("one"); +print md5_hash("one", "two", "three"); + +md5_hash_init("a"); +md5_hash_init("b"); + +md5_hash_update("a", "one"); +md5_hash_update("b", "one"); +md5_hash_update("b", "two"); +md5_hash_update("b", "three"); + +print md5_hash_finish("a"); +print md5_hash_finish("b"); diff --git a/testing/btest/bifs/sha1.test b/testing/btest/bifs/sha1.test new file mode 100644 index 0000000000..85c8df99c5 --- /dev/null +++ b/testing/btest/bifs/sha1.test @@ -0,0 +1,16 @@ +# @TEST-EXEC: bro -b %INPUT >output +# @TEST-EXEC: btest-diff output + +print sha1_hash("one"); +print sha1_hash("one", "two", "three"); + +sha1_hash_init("a"); +sha1_hash_init("b"); + +sha1_hash_update("a", "one"); +sha1_hash_update("b", "one"); +sha1_hash_update("b", "two"); +sha1_hash_update("b", "three"); + +print sha1_hash_finish("a"); +print sha1_hash_finish("b"); diff --git a/testing/btest/bifs/sha256.test b/testing/btest/bifs/sha256.test new file mode 100644 index 0000000000..7451f2fad3 --- /dev/null +++ b/testing/btest/bifs/sha256.test @@ -0,0 +1,16 @@ +# @TEST-EXEC: bro -b %INPUT >output +# @TEST-EXEC: btest-diff output + +print sha256_hash("one"); +print sha256_hash("one", "two", "three"); + +sha256_hash_init("a"); +sha256_hash_init("b"); + +sha256_hash_update("a", "one"); +sha256_hash_update("b", "one"); +sha256_hash_update("b", "two"); +sha256_hash_update("b", "three"); + +print sha256_hash_finish("a"); +print sha256_hash_finish("b"); diff --git a/testing/btest/core/checksums.test b/testing/btest/core/checksums.test index f5b3230686..77fe2a62d3 100644 --- a/testing/btest/core/checksums.test +++ b/testing/btest/core/checksums.test @@ -1,23 +1,42 @@ -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-tcp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-udp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-icmp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-tcp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-udp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-icmp6-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-tcp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-udp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-icmp6-bad-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -r $TRACES/chksums/ip4-bad-chksum.pcap +# @TEST-EXEC: mv weird.log bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip4-tcp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip4-udp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip4-icmp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-tcp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-udp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-icmp6-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-tcp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-udp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-icmp6-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip4-tcp-good-chksum.pcap +# @TEST-EXEC: mv weird.log good.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip4-udp-good-chksum.pcap +# @TEST-EXEC: test ! -e weird.log +# @TEST-EXEC: bro -r $TRACES/chksums/ip4-icmp-good-chksum.pcap +# @TEST-EXEC: test ! -e weird.log +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-tcp-good-chksum.pcap +# @TEST-EXEC: cat weird.log >> good.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-udp-good-chksum.pcap +# @TEST-EXEC: cat weird.log >> good.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-route0-icmp6-good-chksum.pcap +# @TEST-EXEC: cat weird.log >> good.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-tcp-good-chksum.pcap +# @TEST-EXEC: cat weird.log >> good.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-udp-good-chksum.pcap +# @TEST-EXEC: cat weird.log >> good.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-icmp6-good-chksum.pcap +# @TEST-EXEC: cat weird.log >> good.out -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-tcp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-udp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip4-icmp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-tcp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-udp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-route0-icmp6-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-tcp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-udp-good-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-icmp6-good-chksum.pcap >>good.out 2>&1 # @TEST-EXEC: btest-diff bad.out # @TEST-EXEC: btest-diff good.out diff --git a/testing/btest/core/disable-mobile-ipv6.test b/testing/btest/core/disable-mobile-ipv6.test index 84dc43dae8..5151a12b38 100644 --- a/testing/btest/core/disable-mobile-ipv6.test +++ b/testing/btest/core/disable-mobile-ipv6.test @@ -1,6 +1,6 @@ # @TEST-REQUIRES: grep -q "#undef ENABLE_MOBILE_IPV6" $BUILD/config.h -# @TEST-EXEC: bro -b -r $TRACES/mobile-ipv6/mip6_back.trace %INPUT >output 2>&1 -# @TEST-EXEC: btest-diff output +# @TEST-EXEC: bro -r $TRACES/mobile-ipv6/mip6_back.trace %INPUT +# @TEST-EXEC: btest-diff weird.log event mobile_ipv6_message(p: pkt_hdr) { diff --git a/testing/btest/core/file-caching-serialization.test b/testing/btest/core/file-caching-serialization.test new file mode 100644 index 0000000000..7ff1d8be8d --- /dev/null +++ b/testing/btest/core/file-caching-serialization.test @@ -0,0 +1,49 @@ +# This checks that the interactions between open-file caching and +# serialization works ok. In the first case, all files can fit +# in the cache, but get serialized before every write. In the +# second case, files are eventually forced out of the cache and +# undergo serialization, which requires re-opening. + +# @TEST-EXEC: bro -b %INPUT "test_file_prefix=one" +# @TEST-EXEC: btest-diff one0 +# @TEST-EXEC: btest-diff one1 +# @TEST-EXEC: btest-diff one2 +# @TEST-EXEC: bro -b %INPUT "test_file_prefix=two" "max_files_in_cache=2" +# @TEST-EXEC: btest-diff two0 +# @TEST-EXEC: btest-diff two1 +# @TEST-EXEC: btest-diff two2 + +const test_file_prefix = "" &redef; +global file_table: table[string] of file; +global iterations: vector of count = vector(0,1,2,3,4,5,6,7,8); + +function write_to_file(c: count) + { + local f: file; + # Take turns writing across three output files. + local filename = fmt("%s%s", test_file_prefix, c % 3 ); + + if ( filename in file_table ) + f = file_table[filename]; + else + { + f = open(filename); + file_table[filename] = f; + } + + # This when block is a trick to get the frame cloned + # and thus serialize the local file value + when ( local s = fmt("write %d", c) ) + print f, s; + } + +event file_opened(f: file) + { + print f, "opened"; + } + +event bro_init() + { + for ( i in iterations ) + write_to_file(iterations[i]); + } diff --git a/testing/btest/core/icmp/icmp6-events.test b/testing/btest/core/icmp/icmp6-events.test index 64c14920ff..052ba91ee6 100644 --- a/testing/btest/core/icmp/icmp6-events.test +++ b/testing/btest/core/icmp/icmp6-events.test @@ -88,9 +88,12 @@ event icmp_neighbor_solicitation(c: connection, icmp: icmp_conn, tgt: addr) print " icmp_conn: " + fmt("%s", icmp); } -event icmp_neighbor_advertisement(c: connection, icmp: icmp_conn, tgt:addr) +event icmp_neighbor_advertisement(c: connection, icmp: icmp_conn, router: bool, solicited: bool, override: bool, tgt: addr) { print "icmp_neighbor_advertisement (tgt=" + fmt("%s", tgt) + ")"; + print " router=" + fmt("%s", router); + print " solicited=" + fmt("%s", solicited); + print " override=" + fmt("%s", override); print " conn_id: " + fmt("%s", c$id); print " icmp_conn: " + fmt("%s", icmp); } @@ -102,9 +105,19 @@ event icmp_router_solicitation(c: connection, icmp: icmp_conn) print " icmp_conn: " + fmt("%s", icmp); } -event icmp_router_advertisement(c: connection, icmp: icmp_conn, hop_limit: count, managed: bool, router_lifetime: count, reachable_time: interval, retrans_timer: interval) +event icmp_router_advertisement(c: connection, icmp: icmp_conn, cur_hop_limit: count, managed: bool, other: bool, home_agent: bool, pref: count, proxy: bool, rsv: count, router_lifetime: interval, reachable_time: interval, retrans_timer: interval) { - print "icmp_router_advertisement (hop_limit=" + fmt("%d", hop_limit) + ", managed=" + fmt("%s", managed) + ", rlifetime=" + fmt("%d", router_lifetime) + ", reachable=" + fmt("%f", reachable_time) + ", retrans=" + fmt("%f", retrans_timer) + ")"; + print "icmp_router_advertisement"; + print " cur_hop_limit=" + fmt("%s", cur_hop_limit); + print " managed=" + fmt("%s", managed); + print " other=" + fmt("%s", other); + print " home_agent=" + fmt("%s", home_agent); + print " pref=" + fmt("%s", pref); + print " proxy=" + fmt("%s", proxy); + print " rsv=" + fmt("%s", rsv); + print " router_lifetime=" + fmt("%s", router_lifetime); + print " reachable_time=" + fmt("%s", reachable_time); + print " retrans_timer=" + fmt("%s", retrans_timer); print " conn_id: " + fmt("%s", c$id); print " icmp_conn: " + fmt("%s", icmp); } diff --git a/testing/btest/core/leaks/dataseries-rotate.bro b/testing/btest/core/leaks/dataseries-rotate.bro new file mode 100644 index 0000000000..6a3b5550cc --- /dev/null +++ b/testing/btest/core/leaks/dataseries-rotate.bro @@ -0,0 +1,35 @@ +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# @TEST-GROUP: leaks +# @TEST-GROUP: dataseries +# +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -b -r $TRACES/rotation.trace %INPUT Log::default_writer=Log::WRITER_DATASERIES + +module Test; + +export { + # Create a new ID for our log stream + redef enum Log::ID += { LOG }; + + # Define a record with all the columns the log file can have. + # (I'm using a subset of fields from ssh-ext for demonstration.) + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + } &log; +} + +redef Log::default_rotation_interval = 1hr; +redef Log::default_rotation_postprocessor_cmd = "echo"; + +event bro_init() +{ + Log::create_stream(Test::LOG, [$columns=Log]); +} + +event new_connection(c: connection) + { + Log::write(Test::LOG, [$t=network_time(), $id=c$id]); + } diff --git a/testing/btest/core/leaks/dataseries.bro b/testing/btest/core/leaks/dataseries.bro new file mode 100644 index 0000000000..b72b880612 --- /dev/null +++ b/testing/btest/core/leaks/dataseries.bro @@ -0,0 +1,10 @@ +# Needs perftools support. +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# +# @TEST-GROUP: leaks +# @TEST-GROUP: dataseries +# +# @TEST-REQUIRES: bro --help 2>&1 | grep -q mem-leaks +# @TEST-EXEC: HEAP_CHECK_DUMP_DIRECTORY=. HEAPCHECK=local bro -m -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_DATASERIES diff --git a/testing/btest/core/mobility-checksums.test b/testing/btest/core/mobility-checksums.test index 1d41daf543..8a88eb8194 100644 --- a/testing/btest/core/mobility-checksums.test +++ b/testing/btest/core/mobility-checksums.test @@ -1,9 +1,15 @@ # @TEST-REQUIRES: grep -q "#define ENABLE_MOBILE_IPV6" $BUILD/config.h -# @TEST-EXEC: bro -b -r $TRACES/chksums/mip6-bad-mh-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-tcp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-udp-bad-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/mip6-good-mh-chksum.pcap >>good.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-tcp-good-chksum.pcap >>bad.out 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/chksums/ip6-hoa-udp-good-chksum.pcap >>bad.out 2>&1 +# @TEST-EXEC: bro -r $TRACES/chksums/mip6-bad-mh-chksum.pcap +# @TEST-EXEC: mv weird.log bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-hoa-tcp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-hoa-udp-bad-chksum.pcap +# @TEST-EXEC: cat weird.log >> bad.out +# @TEST-EXEC: rm weird.log +# @TEST-EXEC: bro -r $TRACES/chksums/mip6-good-mh-chksum.pcap +# @TEST-EXEC: test ! -e weird.log +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-hoa-tcp-good-chksum.pcap +# @TEST-EXEC: test ! -e weird.log +# @TEST-EXEC: bro -r $TRACES/chksums/ip6-hoa-udp-good-chksum.pcap +# @TEST-EXEC: test ! -e weird.log # @TEST-EXEC: btest-diff bad.out -# @TEST-EXEC: btest-diff good.out diff --git a/testing/btest/core/truncation.test b/testing/btest/core/truncation.test index 16a60fe6db..ee8bdd5bf9 100644 --- a/testing/btest/core/truncation.test +++ b/testing/btest/core/truncation.test @@ -1,6 +1,9 @@ # Truncated IP packet's should not be analyzed, and generate truncated_IP weird -# @TEST-EXEC: bro -b -r $TRACES/trunc/ip4-trunc.pcap >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/trunc/ip6-trunc.pcap >>output 2>&1 -# @TEST-EXEC: bro -b -r $TRACES/trunc/ip6-ext-trunc.pcap >>output 2>&1 +# @TEST-EXEC: bro -r $TRACES/trunc/ip4-trunc.pcap +# @TEST-EXEC: mv weird.log output +# @TEST-EXEC: bro -r $TRACES/trunc/ip6-trunc.pcap +# @TEST-EXEC: cat weird.log >> output +# @TEST-EXEC: bro -r $TRACES/trunc/ip6-ext-trunc.pcap +# @TEST-EXEC: cat weird.log >> output # @TEST-EXEC: btest-diff output diff --git a/testing/btest/coverage/doc.test b/testing/btest/coverage/doc.test index 18ed13e6fa..d99122575d 100644 --- a/testing/btest/coverage/doc.test +++ b/testing/btest/coverage/doc.test @@ -1,7 +1,10 @@ # This tests that we're generating bro script documentation for all the # available bro scripts. If this fails, then the genDocSources.sh needs # to be run to produce a new DocSourcesList.cmake or genDocSources.sh needs -# to be updated to blacklist undesired scripts. +# to be updated to blacklist undesired scripts. To update, run the +# top-level Makefile: +# +# make update-doc-sources # # @TEST-EXEC: $DIST/doc/scripts/genDocSourcesList.sh # @TEST-EXEC: cmp $DIST/doc/scripts/DocSourcesList.cmake ./DocSourcesList.cmake diff --git a/testing/btest/istate/broccoli-ipv6.bro b/testing/btest/istate/broccoli-ipv6.bro index b7ab5bdb05..cd0b546ce7 100644 --- a/testing/btest/istate/broccoli-ipv6.bro +++ b/testing/btest/istate/broccoli-ipv6.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib # diff --git a/testing/btest/istate/broccoli.bro b/testing/btest/istate/broccoli.bro index 235ff9119c..2bae5dc080 100644 --- a/testing/btest/istate/broccoli.bro +++ b/testing/btest/istate/broccoli.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib # diff --git a/testing/btest/istate/events-ssl.bro b/testing/btest/istate/events-ssl.bro index 25aa2dc8fb..03784addef 100644 --- a/testing/btest/istate/events-ssl.bro +++ b/testing/btest/istate/events-ssl.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run sender bro -C -r $TRACES/web.trace --pseudo-realtime ../sender.bro # @TEST-EXEC: btest-bg-run receiver bro ../receiver.bro diff --git a/testing/btest/istate/events.bro b/testing/btest/istate/events.bro index 81a1d765db..81d9cc61b6 100644 --- a/testing/btest/istate/events.bro +++ b/testing/btest/istate/events.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run sender bro -C -r $TRACES/web.trace --pseudo-realtime ../sender.bro # @TEST-EXEC: btest-bg-run receiver bro ../receiver.bro diff --git a/testing/btest/istate/pybroccoli.py b/testing/btest/istate/pybroccoli.py index 1a5830b41a..9f26efca31 100644 --- a/testing/btest/istate/pybroccoli.py +++ b/testing/btest/istate/pybroccoli.py @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/src/libbroccoli.so || test -e $BUILD/aux/broccoli/src/libbroccoli.dylib # @TEST-REQUIRES: test -e $BUILD/aux/broccoli/bindings/broccoli-python/_broccoli_intern.so diff --git a/testing/btest/istate/sync.bro b/testing/btest/istate/sync.bro index db5ea0bbb4..776ddfd2fa 100644 --- a/testing/btest/istate/sync.bro +++ b/testing/btest/istate/sync.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run sender bro %INPUT ../sender.bro # @TEST-EXEC: btest-bg-run receiver bro %INPUT ../receiver.bro diff --git a/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro b/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro index d7b552d962..a1069d1bd0 100644 --- a/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro +++ b/testing/btest/scripts/base/frameworks/cluster/start-it-up.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT # @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT diff --git a/testing/btest/scripts/base/frameworks/communication/communication_log_baseline.bro b/testing/btest/scripts/base/frameworks/communication/communication_log_baseline.bro index 85b23dbdc0..3d80ef7777 100644 --- a/testing/btest/scripts/base/frameworks/communication/communication_log_baseline.bro +++ b/testing/btest/scripts/base/frameworks/communication/communication_log_baseline.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run receiver bro -b ../receiver.bro # @TEST-EXEC: btest-bg-run sender bro -b ../sender.bro diff --git a/testing/btest/scripts/base/frameworks/control/configuration_update.bro b/testing/btest/scripts/base/frameworks/control/configuration_update.bro index d09105ca7a..920a162503 100644 --- a/testing/btest/scripts/base/frameworks/control/configuration_update.bro +++ b/testing/btest/scripts/base/frameworks/control/configuration_update.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controllee Communication::listen_port=65531/tcp # @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. bro %INPUT test-redef frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65531/tcp Control::cmd=configuration_update diff --git a/testing/btest/scripts/base/frameworks/control/id_value.bro b/testing/btest/scripts/base/frameworks/control/id_value.bro index 7235521034..c5d1d063f5 100644 --- a/testing/btest/scripts/base/frameworks/control/id_value.bro +++ b/testing/btest/scripts/base/frameworks/control/id_value.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. bro %INPUT only-for-controllee frameworks/control/controllee Communication::listen_port=65532/tcp # @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65532/tcp Control::cmd=id_value Control::arg=test_var diff --git a/testing/btest/scripts/base/frameworks/control/shutdown.bro b/testing/btest/scripts/base/frameworks/control/shutdown.bro index ec1ca6da16..7b6e5713f8 100644 --- a/testing/btest/scripts/base/frameworks/control/shutdown.bro +++ b/testing/btest/scripts/base/frameworks/control/shutdown.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run controllee BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controllee Communication::listen_port=65530/tcp # @TEST-EXEC: btest-bg-run controller BROPATH=$BROPATH:.. bro %INPUT frameworks/control/controller Control::host=127.0.0.1 Control::host_port=65530/tcp Control::cmd=shutdown diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro new file mode 100644 index 0000000000..fc3752a168 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/options.bro @@ -0,0 +1,44 @@ +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-GROUP: dataseries +# +# @TEST-EXEC: bro -b %INPUT Log::default_writer=Log::WRITER_DATASERIES +# @TEST-EXEC: test -e ssh.ds.xml +# @TEST-EXEC: btest-diff ssh.ds.xml + +module SSH; + +redef LogDataSeries::dump_schema = T; + +# Haven't yet found a way to check for the effect of these. +redef LogDataSeries::compression = "bz2"; +redef LogDataSeries::extent_size = 1000; +redef LogDataSeries::num_threads = 5; + +# LogDataSeries::use_integer_for_time is tested separately. + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event bro_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro new file mode 100644 index 0000000000..7b708473e3 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/rotate.bro @@ -0,0 +1,34 @@ +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-GROUP: dataseries +# +# @TEST-EXEC: bro -b -r ${TRACES}/rotation.trace %INPUT 2>&1 Log::default_writer=Log::WRITER_DATASERIES | grep "test" >out +# @TEST-EXEC: for i in test.*.ds; do printf '> %s\n' $i; ds2txt --skip-index $i; done >>out +# @TEST-EXEC: btest-diff out + +module Test; + +export { + # Create a new ID for our log stream + redef enum Log::ID += { LOG }; + + # Define a record with all the columns the log file can have. + # (I'm using a subset of fields from ssh-ext for demonstration.) + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + } &log; +} + +redef Log::default_rotation_interval = 1hr; +redef Log::default_rotation_postprocessor_cmd = "echo"; + +event bro_init() +{ + Log::create_stream(Test::LOG, [$columns=Log]); +} + +event new_connection(c: connection) + { + Log::write(Test::LOG, [$t=network_time(), $id=c$id]); + } diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro new file mode 100644 index 0000000000..ee0426ae55 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/test-logging.bro @@ -0,0 +1,35 @@ +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-GROUP: dataseries +# +# @TEST-EXEC: bro -b %INPUT Log::default_writer=Log::WRITER_DATASERIES +# @TEST-EXEC: ds2txt --skip-index ssh.ds >ssh.ds.txt +# @TEST-EXEC: btest-diff ssh.ds.txt + +module SSH; + +export { + redef enum Log::ID += { LOG }; + + type Log: record { + t: time; + id: conn_id; # Will be rolled out into individual columns. + status: string &optional; + country: string &default="unknown"; + } &log; +} + +event bro_init() +{ + Log::create_stream(SSH::LOG, [$columns=Log]); + + local cid = [$orig_h=1.2.3.4, $orig_p=1234/tcp, $resp_h=2.3.4.5, $resp_p=80/tcp]; + + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="US"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="UK"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="success", $country="BR"]); + Log::write(SSH::LOG, [$t=network_time(), $id=cid, $status="failure", $country="MX"]); + +} + diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro new file mode 100644 index 0000000000..5e3f864b33 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/time-as-int.bro @@ -0,0 +1,9 @@ +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-GROUP: dataseries +# +# @TEST-EXEC: bro -r $TRACES/wikipedia.trace %INPUT Log::default_writer=Log::WRITER_DATASERIES +# @TEST-EXEC: ds2txt --skip-index conn.ds >conn.ds.txt +# @TEST-EXEC: btest-diff conn.ds.txt + +redef LogDataSeries::use_integer_for_time = T; diff --git a/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro b/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro new file mode 100644 index 0000000000..ee1342c470 --- /dev/null +++ b/testing/btest/scripts/base/frameworks/logging/dataseries/wikipedia.bro @@ -0,0 +1,9 @@ +# +# @TEST-REQUIRES: has-writer DataSeries && which ds2txt +# @TEST-GROUP: dataseries +# +# @TEST-EXEC: bro -r $TRACES/wikipedia.trace Log::default_writer=Log::WRITER_DATASERIES +# @TEST-EXEC: ds2txt --skip-index conn.ds >conn.ds.txt +# @TEST-EXEC: ds2txt --skip-index http.ds >http.ds.txt +# @TEST-EXEC: btest-diff conn.ds.txt +# @TEST-EXEC: btest-diff http.ds.txt diff --git a/testing/btest/scripts/base/frameworks/logging/remote-types.bro b/testing/btest/scripts/base/frameworks/logging/remote-types.bro index ce93495bc8..f1ef4f0c31 100644 --- a/testing/btest/scripts/base/frameworks/logging/remote-types.bro +++ b/testing/btest/scripts/base/frameworks/logging/remote-types.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run sender bro --pseudo-realtime %INPUT ../sender.bro # @TEST-EXEC: btest-bg-run receiver bro --pseudo-realtime %INPUT ../receiver.bro diff --git a/testing/btest/scripts/base/frameworks/logging/remote.bro b/testing/btest/scripts/base/frameworks/logging/remote.bro index bb1e5b8ce3..8375d7915a 100644 --- a/testing/btest/scripts/base/frameworks/logging/remote.bro +++ b/testing/btest/scripts/base/frameworks/logging/remote.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run sender bro --pseudo-realtime %INPUT ../sender.bro # @TEST-EXEC: sleep 1 diff --git a/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro b/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro index 7c06ff9248..3f6d40adaf 100644 --- a/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro +++ b/testing/btest/scripts/base/frameworks/logging/rotate-custom.bro @@ -1,5 +1,5 @@ # -# @TEST-EXEC: bro -b -r %DIR/rotation.trace %INPUT | egrep "test|test2" | sort >out +#@TEST-EXEC: bro -b -r ${TRACES}/rotation.trace %INPUT | egrep "test|test2" | sort >out # @TEST-EXEC: for i in `ls test*.log | sort`; do printf '> %s\n' $i; cat $i; done | sort | uniq >>out # @TEST-EXEC: btest-diff out # @TEST-EXEC: btest-diff .stderr diff --git a/testing/btest/scripts/base/frameworks/logging/rotate.bro b/testing/btest/scripts/base/frameworks/logging/rotate.bro index 14123c56c6..86f659c193 100644 --- a/testing/btest/scripts/base/frameworks/logging/rotate.bro +++ b/testing/btest/scripts/base/frameworks/logging/rotate.bro @@ -1,6 +1,6 @@ # -# @TEST-EXEC: bro -b -r %DIR/rotation.trace %INPUT 2>&1 | grep "test" >out -# @TEST-EXEC: for i in test.*.log; do printf '> %s\n' $i; cat $i; done >>out +# @TEST-EXEC: bro -b -r ${TRACES}/rotation.trace %INPUT 2>&1 | grep "test" >out +# @TEST-EXEC: for i in `ls test.*.log | sort`; do printf '> %s\n' $i; cat $i; done >>out # @TEST-EXEC: btest-diff out module Test; diff --git a/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro b/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro index b801074b33..09479b7a2f 100644 --- a/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro +++ b/testing/btest/scripts/base/frameworks/metrics/basic-cluster.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT # @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT diff --git a/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro b/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro index 701d2ea378..654e42976a 100644 --- a/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro +++ b/testing/btest/scripts/base/frameworks/metrics/cluster-intermediate-update.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT # @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT diff --git a/testing/btest/scripts/base/frameworks/notice/cluster.bro b/testing/btest/scripts/base/frameworks/notice/cluster.bro index 97470eaa7f..8d54a27eaf 100644 --- a/testing/btest/scripts/base/frameworks/notice/cluster.bro +++ b/testing/btest/scripts/base/frameworks/notice/cluster.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT # @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT diff --git a/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro b/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro index d56d940e8e..b812c6451d 100644 --- a/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro +++ b/testing/btest/scripts/base/frameworks/notice/suppression-cluster.bro @@ -1,4 +1,4 @@ -# @TEST-GROUP: comm +# @TEST-SERIALIZE: comm # # @TEST-EXEC: btest-bg-run manager-1 BROPATH=$BROPATH:.. CLUSTER_NODE=manager-1 bro %INPUT # @TEST-EXEC: btest-bg-run proxy-1 BROPATH=$BROPATH:.. CLUSTER_NODE=proxy-1 bro %INPUT diff --git a/testing/btest/scripts/base/protocols/conn/contents-default-extract.test b/testing/btest/scripts/base/protocols/conn/contents-default-extract.test new file mode 100644 index 0000000000..82f46b62c8 --- /dev/null +++ b/testing/btest/scripts/base/protocols/conn/contents-default-extract.test @@ -0,0 +1,3 @@ +# @TEST-EXEC: bro -f "tcp port 21" -r $TRACES/ipv6-ftp.trace "Conn::default_extract=T" +# @TEST-EXEC: btest-diff contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_orig.dat +# @TEST-EXEC: btest-diff contents_[2001:470:1f11:81f:c999:d94:aa7c:2e3e]:49185-[2001:470:4867:99::21]:21_resp.dat diff --git a/testing/btest/scripts/base/protocols/ssl/basic.test b/testing/btest/scripts/base/protocols/ssl/basic.test new file mode 100644 index 0000000000..94b0e87ec1 --- /dev/null +++ b/testing/btest/scripts/base/protocols/ssl/basic.test @@ -0,0 +1,4 @@ +# This tests a normal SSL connection and the log it outputs. + +# @TEST-EXEC: bro -r $TRACES/tls-conn-with-extensions.trace %INPUT +# @TEST-EXEC: btest-diff ssl.log diff --git a/testing/external/Makefile b/testing/external/Makefile index b705734003..9715b3d669 100644 --- a/testing/external/Makefile +++ b/testing/external/Makefile @@ -6,11 +6,11 @@ DIAG=diag.log all: @rm -f $(DIAG) - @for repo in $(REPOS); do (cd $$repo && make ); done + @for repo in $(REPOS); do (cd $$repo && make -s ); done brief: @rm -f $(DIAG) - @for repo in $(REPOS); do (cd $$repo && make brief ); done + @for repo in $(REPOS); do (cd $$repo && make -s brief ); done init: git clone $(PUBLIC_REPO) diff --git a/testing/external/scripts/update-traces b/testing/external/scripts/update-traces index 8c27fb055e..8dd8d09e9c 100755 --- a/testing/external/scripts/update-traces +++ b/testing/external/scripts/update-traces @@ -69,9 +69,9 @@ cat $cfg | while read line; do eval "$proxy curl $auth -f --anyauth $url -o $file" echo mv $fp.tmp $fp - else - echo "`basename $file` already available." - fi + #else + # echo "`basename $file` already available." + fi rm -f $fp.tmp diff --git a/testing/external/subdir-btest.cfg b/testing/external/subdir-btest.cfg index c4e74f99fa..fba89fb724 100644 --- a/testing/external/subdir-btest.cfg +++ b/testing/external/subdir-btest.cfg @@ -10,7 +10,7 @@ BROPATH=`bash -c %(testbase)s/../../../build/bro-path-dev`:%(testbase)s/../scrip BRO_SEED_FILE=%(testbase)s/../random.seed TZ=UTC LC_ALL=C -PATH=%(testbase)s/../../../build/src:%(testbase)s/../../../aux/btest:%(default_path)s +PATH=%(testbase)s/../../../build/src:%(testbase)s/../../../aux/btest:%(testbase)s/../../scripts:%(default_path)s TEST_DIFF_CANONIFIER=%(testbase)s/../../scripts/diff-canonifier-external TEST_DIFF_BRIEF=1 TRACES=%(testbase)s/Traces diff --git a/testing/scripts/has-writer b/testing/scripts/has-writer new file mode 100755 index 0000000000..683d31041f --- /dev/null +++ b/testing/scripts/has-writer @@ -0,0 +1,6 @@ +#! /usr/bin/env bash +# +# Returns true if Bro has been compiled with support for writer type +# $1. The type name must match what "bro --help" prints. + +bro --helper 2>&1 | grep -qi "Supported log formats:.*$1"