diff --git a/.gitmodules b/.gitmodules index 4998cc6b80..24375ce23d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -19,3 +19,6 @@ [submodule "src/3rdparty"] path = src/3rdparty url = git://git.bro.org/bro-3rdparty +[submodule "aux/plugins"] + path = aux/plugins + url = git://git.bro.org/bro-plugins diff --git a/CHANGES b/CHANGES index e4588559c8..b6a6399ae2 100644 --- a/CHANGES +++ b/CHANGES @@ -1,4 +1,256 @@ +2.3-238 | 2014-10-16 06:51:49 -0700 + + * Fix multipart HTTP/MIME entity file analysis so that (1) singular + CR or LF characters in multipart body content are no longer + converted to a full CRLF (thus corrupting the file) and (2) it + also no longer considers the CRLF before the multipart boundary as + part of the content. Addresses BIT-1235. (Jon Siwek) + +2.3-235 | 2014-10-15 10:20:47 -0500 + + * BIT-1273: Add error message for bad enum declaration syntax. + (Jon Siwek) + +2.3-234 | 2014-10-14 14:42:09 -0500 + + * Documentation fixes. (Steve Smoot) + +2.3-233 | 2014-10-09 16:00:27 -0500 + + * Change find-bro-logs unit test to follow symlinks. (Jon Siwek) + + * Add error checks and messages to a test script (Daniel Thayer) + +2.3-230 | 2014-10-08 08:15:17 -0700 + + * Further baseline normalization for plugin test portability. (Robin + Sommer) + +2.3-229 | 2014-10-07 20:18:11 -0700 + + * Fix for test portability. (Robin Sommer) + +2.3-228 | 2014-10-07 15:32:37 -0700 + + * Include plugin unit tests into the top-level btest configuration. (Robin Sommer) + + * Switching the prefix separator for packet source/dumper plugins + once more, now to "::". Addresses BIT-1267. (Robin Sommer) + + * Fix for allowing a packet source/dumper plugin to support multiple + prefixes with a colon. (Robin Sommer) + +2.3-225 | 2014-10-07 15:13:35 -0700 + + * Updating plugin documentation. (Robin Sommer) + +2.3-224 | 2014-10-07 14:32:17 -0700 + + * Improved the log file reference documentation. (Jeannette Dopheide + and Daniel Thayer) + + * Improves shockwave flash file signatures. (Seth Hall) + + - This moves the signatures out of the libmagic imported signatures + and into our own general.sig. + + - Expand the detection to LZMA compressed flash files. + + * Add new script language reference documentation on operators, + statements, and directives. Also improved the documentation on + types and attributes by splitting them into two docs, and + providing more examples and adding a chart on the top of each page + with links to each type and attribute for easier access to the + information. (Daniel Thayer) + + * Split the types and attributes reference doc into two docs. + (Daniel Thayer) + +2.3-208 | 2014-10-03 09:38:52 -0500 + + * BIT-1268: Fix uninitialized router_list argument in + dhcp_offer/dhcp_ack. (Jon Siwek) + +2.3-207 | 2014-10-02 16:39:17 -0700 + + * Updating plugin docs. (Robin Sommer) + + * Fix packet sources being treated as idle when a packet is + available. Addresses BIT-1266. (Jon Siwek) + + * Fix regression causing the main loop to spin more frequently. + Addresses BIT-1266. (Jon Siwek) + +2.3-203 | 2014-09-29 20:06:54 -0700 + + * Fix to use length parameter in DNP3 time conversion correctly now. + (Robin Sommer) + +2.3-202 | 2014-09-29 17:05:18 -0700 + + * New SSL extension type from IANA and a few other SSL const + changes. (Johanna Amann) + + * Make unexpected pipe errors fatal as precaution. Addresses + BIT-1260. (Jon Siwek) + + * Adding a function for DNP3 to translate the timestamp format. (Hui + Lin) + +2.3-197 | 2014-09-29 10:42:01 -0500 + + * Fix possible seg fault in TCP reassembler. (Jon Siwek) + +2.3-196 | 2014-09-25 17:53:27 -0700 + + * Changing prefix for packet sources/dumper from ':' to '%'. + Addresses BIT-1249. (Robin Sommer) + + * Remove timeouts from remote communication loop. The select() now + blocks until there's work to do instead of relying on a small + timeout value which can cause unproductive use of cpu cycles. (Jon + Siwek) + + * Improve error message when failing to activate a plugin. Also fix + a unit test helper script that checks plugin availability. (Jon + Siwek) + +2.3-183 | 2014-09-24 10:08:04 -0500 + + * Add a "node" field to Intel::Seen struture and intel.log to + indicate which node discovered a hit on an intel item. (Seth Hall) + + * BIT-1261: Fixes to plugin quick start doc. (Jon Siwek) + +2.3-180 | 2014-09-22 12:52:41 -0500 + + * BIT-1259: Fix issue w/ duplicate TCP reassembly deliveries. + (Jon Siwek) + +2.3-178 | 2014-09-18 14:29:46 -0500 + + * BIT-1256: Fix file analysis events from coming after bro_done(). + (Jon Siwek) + +2.3-177 | 2014-09-17 09:41:27 -0500 + + * Documentation fixes. (Chris Mavrakis) + +2.3-174 | 2014-09-17 09:37:09 -0500 + + * Fixed some "make doc" warnings caused by reST formatting + (Daniel Thayer). + +2.3-172 | 2014-09-15 13:38:52 -0500 + + * Remove unneeded allocations for HTTP messages. (Jon Siwek) + +2.3-171 | 2014-09-15 11:14:57 -0500 + + * Fix a compile error on systems without pcap-int.h. (Jon Siwek) + +2.3-170 | 2014-09-12 19:28:01 -0700 + + * Fix incorrect data delivery skips after gap in HTTP Content-Range. + Addresses BIT-1247. (Jon Siwek) + + * Fix file analysis placement of data after gap in HTTP + Content-Range. Addresses BIT-1248. (Jon Siwek) + + * Fix issue w/ TCP reassembler not delivering some segments. + Addresses BIT-1246. (Jon Siwek) + + * Fix MIME entity file data/gap ordering and raise http_entity_data + in line with data arrival. Addresses BIT-1240. (Jon Siwek) + + * Implement file ID caching for MIME_Mail. (Jon Siwek) + + * Fix a compile error. (Jon Siwek) + +2.3-161 | 2014-09-09 12:35:38 -0500 + + * Bugfixes and test updates/additions. (Robin Sommer) + + * Interface tweaks and docs for PktSrc/PktDumper. (Robin Sommer) + + * Moving PCAP-related bifs to iosource/pcap.bif. (Robin Sommer) + + * Moving some of the BPF filtering code into base class. + This will allow packet sources that don't support BPF natively to + emulate the filtering via libpcap. (Robin Sommer) + + * Removing FlowSrc. (Robin Sommer) + + * Removing remaining pieces of the 2ndary path, and left-over + files of packet sorter. (Robin Sommer) + + * A bunch of infrastructure work to move IOSource, IOSourceRegistry + (now iosource::Manager) and PktSrc/PktDumper code into iosource/, + and over to a plugin structure. (Robin Sommer) + +2.3-137 | 2014-09-08 19:01:13 -0500 + + * Fix Broxygen's rendering of opaque types. (Jon Siwek) + +2.3-136 | 2014-09-07 20:50:46 -0700 + + * Change more http links to https. (Johanna Amann) + +2.3-134 | 2014-09-04 16:16:36 -0700 + + * Fixed a number of issues with OCSP reply validation. Addresses + BIT-1212. (Johanna Amann) + + * Fix null pointer dereference in OCSP verification code in case no + certificate is sent as part as the ocsp reply. Addresses BIT-1212. + (Johanna Amann) + +2.3-131 | 2014-09-04 16:10:32 -0700 + + * Make links in documentation templates protocol relative. (Johanna + Amann) + +2.3-129 | 2014-09-02 17:21:21 -0700 + + * Simplify a conditional with equivalent branches. (Jon Siwek) + + * Change EDNS parsing code to use rdlength more cautiously. (Jon + Siwek) + + * Fix a memory leak when bind() fails due to EADDRINUSE. (Jon Siwek) + + * Fix possible buffer over-read in DNS TSIG parsing. (Jon Siwek) + +2.3-124 | 2014-08-26 09:24:19 -0500 + + * Better documentation for sub_bytes (Jimmy Jones) + + * BIT-1234: Fix build on systems that already have ntohll/htonll + (Jon Siwek) + +2.3-121 | 2014-08-22 15:22:15 -0700 + + * Detect functions that try to bind variables from an outer scope + and raise an error saying that's not supported. Addresses + BIT-1233. (Jon Siwek) + +2.3-116 | 2014-08-21 16:04:13 -0500 + + * Adding plugin testing to Makefile's test-all. (Robin Sommer) + + * Converting log writers and input readers to plugins. + DataSeries and ElasticSearch plugins have moved to the new + bro-plugins repository, which is now a git submodule in the + aux/plugins directory. (Robin Sommer) + +2.3-98 | 2014-08-19 11:03:46 -0500 + + * Silence some doc-related warnings when using `bro -e`. + Closes BIT-1232. (Jon Siwek) + + * Fix possible null ptr derefs reported by Coverity. (Jon Siwek) + 2.3-96 | 2014-08-01 14:35:01 -0700 * Small change to DHCP documentation. In server->client messages the @@ -20,7 +272,7 @@ 2.3-86 | 2014-07-31 14:19:58 -0700 * Fix for redefining built-in constants. (Robin Sommer) - + * Adding missing check that a plugin's API version matches what Bro defines. (Robin Sommer) @@ -38,7 +290,7 @@ main functionality. Changes coming with this: - Replacing the old Plugin macro magic with a new API. - + - The plugin API changed to generally use std::strings instead of const char*. @@ -107,7 +359,7 @@ 2.3-7 | 2014-06-26 17:35:18 -0700 * Extending "make test-all" to include aux/bro-aux. (Robin Sommer) - + 2.3-6 | 2014-06-26 17:24:10 -0700 * DataSeries compilation issue fixed. (mlaterman) @@ -230,7 +482,7 @@ and the load balancing FAQ on the website. (Daniel Thayer) * Update some doc tests and line numbers (Daniel Thayer) - + 2.2-457 | 2014-05-16 14:38:31 -0700 * New script policy/protocols/ssl/validate-ocsp.bro that adds OSCP @@ -245,7 +497,7 @@ Amann) * Improved Heartbleed attack/scan detection. (Bernhard Amann) - + * Let TLS analyzer fail better when no longer in sync with the data stream. (Bernhard Amann) @@ -282,12 +534,12 @@ *Undelivered methods now use a uint64 in place of an int for the relative sequence space offset parameter. - Addresses BIT-348. + Addresses BIT-348. * Fixing compiler warnings. (Robin Sommer) - + * Update SNMP analyzer's DeliverPacket method signature. (Jon Siwek) - + 2.2-417 | 2014-05-07 10:59:22 -0500 * Change handling of atypical OpenSSL error case in x509 verification. (Jon Siwek) @@ -327,7 +579,7 @@ 2.2-397 | 2014-05-01 20:29:20 -0700 * Fix reference counting for lookup_ID() usages. (Jon Siwek) - + 2.2-395 | 2014-05-01 20:25:48 -0700 * Fix missing "irc-dcc-data" service field from IRC DCC connections. @@ -340,7 +592,7 @@ Siwek) * Improve file analysis manager shutdown/cleanup. (Jon Siwek) - + 2.2-388 | 2014-04-24 18:38:07 -0700 * Fix decoding of MIME quoted-printable. (Mareq) @@ -353,11 +605,11 @@ 2.2-381 | 2014-04-24 17:08:45 -0700 * Add Java version to software framework. (Brian Little) - + 2.2-379 | 2014-04-24 17:06:21 -0700 * Remove unused Val::attribs member. (Jon Siwek) - + 2.2-377 | 2014-04-24 16:57:54 -0700 * A larger set of SSL improvements and extensions. Addresses @@ -370,7 +622,7 @@ server_name, alpn, and ec-curves. - Adds support for the heartbeat events. - + - Add Heartbleed detector script. - Adds basic support for OCSP stapling. @@ -381,7 +633,7 @@ 2.2-353 | 2014-04-24 16:12:30 -0700 * Adapt HTTP partial content to cache file analysis IDs. (Jon Siwek) - + * Adapt SSL analyzer to generate file analysis handles itself. (Jon Siwek) @@ -397,11 +649,11 @@ Siwek) * Refactor file analysis file ID lookup. (Jon Siwek) - + 2.2-344 | 2014-04-22 20:13:30 -0700 * Refactor various hex escaping code. (Jon Siwek) - + 2.2-341 | 2014-04-17 18:01:41 -0500 * Fix duplicate DNS log entries. (Robin Sommer) @@ -409,9 +661,9 @@ 2.2-341 | 2014-04-17 18:01:01 -0500 * Refactor initialization of ASCII log writer options. (Jon Siwek) - + * Fix a memory leak in ASCII log writer. (Jon Siwek) - + 2.2-338 | 2014-04-17 17:48:17 -0500 * Disable input/logging threads setting their names on every @@ -423,7 +675,7 @@ 2.2-335 | 2014-04-10 15:04:57 -0700 * Small logic fix for main SSL script. (Bernhard Amann) - + * Update DPD signatures for detecting TLS 1.2. (Bernhard Amann) * Remove unused data member of SMTP_Analyzer to silence a Coverity @@ -447,7 +699,7 @@ 2.2-317 | 2014-04-03 10:51:31 -0400 - * Add a uid field to the signatures.log. Addresses BIT-1171 + * Add a uid field to the signatures.log. Addresses BIT-1171 (Anthony Verez) 2.2-315 | 2014-04-01 16:50:01 -0700 @@ -458,7 +710,7 @@ 2.2-313 | 2014-04-01 16:40:19 -0700 * Fix a couple nits reported by Coverity.(Jon Siwek) - + * Fix potential memory leak in IP frag reassembly reported by Coverity. (Jon Siwek) @@ -548,14 +800,14 @@ 2.2-271 | 2014-03-30 20:25:17 +0200 * Add unit tests covering vector/set/table ctors/inits. (Jon Siwek) - + * Fix parsing of "local" named table constructors. (Jon Siwek) * Improve type checking of records. Addresses BIT-1159. (Jon Siwek) - + 2.2-267 | 2014-03-30 20:21:43 +0200 - * Improve documentation of Bro clusters. Addresses BIT-1160. + * Improve documentation of Bro clusters. Addresses BIT-1160. (Daniel Thayer) 2.2-263 | 2014-03-30 20:19:05 +0200 @@ -582,7 +834,7 @@ 2.2-254 | 2014-03-30 19:55:22 +0200 * Update instructions on how to build Bro docs. (Daniel Thayer) - + 2.2-251 | 2014-03-28 08:37:37 -0400 * Quick fix to the ElasticSearch writer. (Seth Hall) @@ -612,7 +864,7 @@ formatter work. * Fixing compiler error. (Robin Sommer) - + * Fixing (very unlikely) double delete in HTTP analyzer when decapsulating CONNECTs. (Robin Sommer) @@ -638,7 +890,7 @@ Addresses BIT-1134. (Jon Siwek) * Enable fake DNS mode for test suites. - + * Improve analysis of TCP SYN/SYN-ACK reversal situations. (Jon Siwek) @@ -670,7 +922,7 @@ (Jon Siwek) * Silences some documentation warnings from Sphinx. (Jon Siwek) - + 2.2-215 | 2014-03-10 11:10:15 -0700 * Fix non-deterministic logging of unmatched DNS msgs. Addresses @@ -708,7 +960,7 @@ HTTP traffic. (Seth Hall) * Fixing removal of support analyzers. (Robin Sommer) - + 2.2-199 | 2014-03-03 16:34:20 -0800 * Allow iterating over bif functions with result type vector of any. @@ -723,11 +975,11 @@ 2.2-194 | 2014-02-28 14:50:53 -0800 * Remove packet sorter. Addresses BIT-700. (Bernhard Amann) - + 2.2-192 | 2014-02-28 09:46:43 -0800 * Update Mozilla root bundle. (Bernhard Amann) - + 2.2-190 | 2014-02-27 07:34:44 -0800 * Adjust timings of a few leak tests. (Bernhard Amann) @@ -757,7 +1009,7 @@ 2.2-177 | 2014-02-20 17:27:46 -0800 * Update to libmagic version 5.17. Addresses BIT-1136. (Jon Siwek) - + 2.2-174 | 2014-02-14 12:07:04 -0800 * Support for MPLS over VLAN. (Chris Kanich) @@ -786,7 +1038,7 @@ defined, but we see it being actively used. (Bernhard Amann) * Test baseline updates for DNS change. (Robin Sommer) - + 2.2-158 | 2014-02-09 23:45:39 -0500 * Change dns.log to include only standard DNS queries. (Jon Siwek) @@ -862,7 +1114,7 @@ 2.2-128 | 2014-01-30 15:58:47 -0800 * Add leak test for Exec module. (Bernhard Amann) - + * Fix file_over_new_connection event to trigger when entire file is missed. (Jon Siwek) @@ -875,7 +1127,7 @@ "detect_filtered_trace". (Jon Siwek) * Improve TCP FIN retransmission handling. (Jon Siwek) - + 2.2-120 | 2014-01-28 10:25:23 -0800 * Fix and extend x509_extension() event, which now actually returns @@ -893,10 +1145,10 @@ 2.2-115 | 2014-01-22 12:11:18 -0800 * Add unit tests for new Bro Manual docs. (Jon Siwek) - + * New content for the "Using Bro" section of the manual. (Rafael Bonilla/Jon Siwek) - + 2.2-105 | 2014-01-20 12:16:48 -0800 * Support GRE tunnel decapsulation, including enhanced GRE headers. @@ -905,7 +1157,7 @@ Addresses BIT-867. (Jon Siwek) * Simplify FragReassembler memory management. (Jon Siwek) - + 2.2-102 | 2014-01-20 12:00:29 -0800 * Include file information (MIME type and description) into notice @@ -914,7 +1166,7 @@ 2.2-100 | 2014-01-20 11:54:58 -0800 * Fix caching of recently validated SSL certifcates. (Justin Azoff) - + 2.2-98 | 2014-01-20 11:50:32 -0800 * For notice suppresion, instead of storing the entire notice in @@ -942,7 +1194,7 @@ the city database instead of just the former. (Jon Siwek) * Broxygen init fixes. Addresses BIT-1110. (Jon Siwek) - + - Don't check mtime of bro binary if BRO_DISABLE_BROXYGEN env var set. - Fix failure to locate bro binary if invoking from a relative @@ -955,7 +1207,7 @@ len field being set to zero. (Seth Hall) * Canonify output of a unit test. (Jon Siwek) - + * A set of documentation updates. (Daniel Thayer) - Fix typo in Bro 2.2 NEWS on string indexing. @@ -1002,9 +1254,9 @@ (Jon Siwek) * Close signature files after done parsing. (Jon Siwek) - + * Fix unlikely null ptr deref in broxygen::Manager. (Jon Siwek) - + * FreeBSD build fix addendum: unintended variable shadowing. (Jon Siwek) @@ -1023,7 +1275,7 @@ were cleaned up. Addresses BIT-1103. (Bernhard Amann) * Minor Broxygen improvements. Addresses BIT-1098. (Jon Siwek) - + 2.2-51 | 2013-12-05 07:53:37 -0800 * Improve a unit test involving 'when' conditionals. (Jon Siwek) @@ -1048,7 +1300,7 @@ 2.2-44 | 2013-12-04 12:41:51 -0800 * Fix string slice notation. Addresses BIT-1097. (Jon Siwek) - + Slice ranges were not correctly determined for negative indices and also off by one in general (included one more element at the end of the substring than what actually matched the index range). @@ -1103,7 +1355,7 @@ 2.2-11 | 2013-12-03 10:56:28 -0800 * Unit test for broccoli vector support. (Jon Siwek) - + * Changed ordering of Bro type tag enum, which was out of sync. (Jon Siwek) @@ -1115,14 +1367,14 @@ 2.2-6 | 2013-11-15 07:05:15 -0800 * Make "install-example-configs" target use DESTDIR. (Jon Siwek) - + 2.2-5 | 2013-11-11 13:47:54 -0800 * Fix the irc_reply event for certain server message types. (Seth Hall) * Fixed Segmentation fault in SQLite Writer. (Jon Crussell) - + 2.2 | 2013-11-07 10:25:50 -0800 * Release 2.2. diff --git a/CMakeLists.txt b/CMakeLists.txt index 77aac6c611..22d63a89d5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -127,33 +127,6 @@ if (GOOGLEPERFTOOLS_FOUND) endif () endif () -set(USE_DATASERIES false) -find_package(Lintel) -find_package(DataSeries) -find_package(LibXML2) - -if (NOT DISABLE_DATASERIES AND - LINTEL_FOUND AND DATASERIES_FOUND AND LIBXML2_FOUND) - set(USE_DATASERIES true) - include_directories(BEFORE ${Lintel_INCLUDE_DIR}) - include_directories(BEFORE ${DataSeries_INCLUDE_DIR}) - include_directories(BEFORE ${LibXML2_INCLUDE_DIR}) - list(APPEND OPTLIBS ${Lintel_LIBRARIES}) - list(APPEND OPTLIBS ${DataSeries_LIBRARIES}) - list(APPEND OPTLIBS ${LibXML2_LIBRARIES}) -endif() - -set(USE_ELASTICSEARCH false) -set(USE_CURL false) -find_package(LibCURL) - -if (NOT DISABLE_ELASTICSEARCH AND LIBCURL_FOUND) - set(USE_ELASTICSEARCH true) - set(USE_CURL true) - include_directories(BEFORE ${LibCURL_INCLUDE_DIR}) - list(APPEND OPTLIBS ${LibCURL_LIBRARIES}) -endif() - if (ENABLE_PERFTOOLS_DEBUG OR ENABLE_PERFTOOLS) # Just a no op to prevent CMake from complaining about manually-specified # ENABLE_PERFTOOLS_DEBUG or ENABLE_PERFTOOLS not being used if google @@ -175,6 +148,8 @@ set(brodeps include(TestBigEndian) test_big_endian(WORDS_BIGENDIAN) +include(CheckSymbolExists) +check_symbol_exists(htonll arpa/inet.h HAVE_BYTEORDER_64) include(OSSpecific) include(CheckTypes) @@ -252,10 +227,6 @@ message( "\n tcmalloc: ${USE_PERFTOOLS_TCMALLOC}" "\n debugging: ${USE_PERFTOOLS_DEBUG}" "\njemalloc: ${ENABLE_JEMALLOC}" - "\ncURL: ${USE_CURL}" - "\n" - "\nDataSeries: ${USE_DATASERIES}" - "\nElasticSearch: ${USE_ELASTICSEARCH}" "\n" "\n================================================================\n" ) diff --git a/Makefile b/Makefile index 2b8e66503b..49d9a6173c 100644 --- a/Makefile +++ b/Makefile @@ -56,6 +56,7 @@ test-all: test test -d aux/broctl && ( cd aux/broctl && make test ) test -d aux/btest && ( cd aux/btest && make test ) test -d aux/bro-aux && ( cd aux/bro-aux && make test ) + test -d aux/plugins && ( cd aux/plugins && make test-all ) configured: @test -d $(BUILD) || ( echo "Error: No build/ directory found. Did you run configure?" && exit 1 ) diff --git a/NEWS b/NEWS index f06115d4ea..6da13833c3 100644 --- a/NEWS +++ b/NEWS @@ -22,7 +22,7 @@ New Functionality plugin can furthermore hook into Bro's processing a number of places to add custom logic. - See http://www.bro.org/sphinx-git/devel/plugins.html for more + See https://www.bro.org/sphinx-git/devel/plugins.html for more information on writing plugins. Changed Functionality diff --git a/VERSION b/VERSION index e247bc816b..c0e0aba98f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.3-96 +2.3-238 diff --git a/aux/binpac b/aux/binpac index 30c156d879..3a4684801a 160000 --- a/aux/binpac +++ b/aux/binpac @@ -1 +1 @@ -Subproject commit 30c156d879f6303f15ebf8e59989d8a42d882bdf +Subproject commit 3a4684801aafa0558383199e9abd711650b53af9 diff --git a/aux/bro-aux b/aux/bro-aux index bdb83e43ce..95afe42e74 160000 --- a/aux/bro-aux +++ b/aux/bro-aux @@ -1 +1 @@ -Subproject commit bdb83e43ce29250b32033e96c3054c486cbee1ef +Subproject commit 95afe42e7474113a16cb2cb09ebdf8b552c59744 diff --git a/aux/broccoli b/aux/broccoli index 07cfcc76fb..33d0ed4a54 160000 --- a/aux/broccoli +++ b/aux/broccoli @@ -1 +1 @@ -Subproject commit 07cfcc76fb08365b545bd3f412c3f6e6c92824e9 +Subproject commit 33d0ed4a54a6ecf08a0b5fe18831aa413b437066 diff --git a/aux/broctl b/aux/broctl index 2606a95c9d..2f808bc854 160000 --- a/aux/broctl +++ b/aux/broctl @@ -1 +1 @@ -Subproject commit 2606a95c9dcbc83bd863c2981ce7189e5d58697b +Subproject commit 2f808bc8541378b1a4953cca02c58c43945d154f diff --git a/aux/plugins b/aux/plugins new file mode 160000 index 0000000000..ad600b5bdc --- /dev/null +++ b/aux/plugins @@ -0,0 +1 @@ +Subproject commit ad600b5bdcd56a2723e323c0f2c8e1708956ca4f diff --git a/cmake b/cmake index f2e8ba6b90..03de0cc467 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit f2e8ba6b90b3a2da9f1f77c55d0e718c25376bbb +Subproject commit 03de0cc467d2334dcb851eddd843d59fef217909 diff --git a/config.h.in b/config.h.in index d3889a2d90..755a9eee98 100644 --- a/config.h.in +++ b/config.h.in @@ -129,6 +129,9 @@ /* whether words are stored with the most significant byte first */ #cmakedefine WORDS_BIGENDIAN +/* whether htonll/ntohll is defined in */ +#cmakedefine HAVE_BYTEORDER_64 + /* ultrix can't hack const */ #cmakedefine NEED_ULTRIX_CONST_HACK #ifdef NEED_ULTRIX_CONST_HACK diff --git a/configure b/configure index 35095c333a..5747586db8 100755 --- a/configure +++ b/configure @@ -39,8 +39,6 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --disable-auxtools don't build or install auxiliary tools --disable-perftools don't try to build with Google Perftools --disable-python don't try to build python bindings for broccoli - --disable-dataseries don't use the optional DataSeries log writer - --disable-elasticsearch don't use the optional ElasticSearch log writer Required Packages in Non-Standard Locations: --with-openssl=PATH path to OpenSSL install root @@ -62,9 +60,6 @@ Usage: $0 [OPTION]... [VAR=VALUE]... --with-ruby-lib=PATH path to ruby library --with-ruby-inc=PATH path to ruby headers --with-swig=PATH path to SWIG executable - --with-dataseries=PATH path to DataSeries and Lintel libraries - --with-xml2=PATH path to libxml2 installation (for DataSeries) - --with-curl=PATH path to libcurl install root (for ElasticSearch) Packaging Options (for developers): --binary-package toggle special logic for binary packaging @@ -183,12 +178,6 @@ while [ $# -ne 0 ]; do --enable-ruby) append_cache_entry DISABLE_RUBY_BINDINGS BOOL false ;; - --disable-dataseries) - append_cache_entry DISABLE_DATASERIES BOOL true - ;; - --disable-elasticsearch) - append_cache_entry DISABLE_ELASTICSEARCH BOOL true - ;; --with-openssl=*) append_cache_entry OpenSSL_ROOT_DIR PATH $optarg ;; @@ -243,16 +232,6 @@ while [ $# -ne 0 ]; do --with-swig=*) append_cache_entry SWIG_EXECUTABLE PATH $optarg ;; - --with-dataseries=*) - append_cache_entry DataSeries_ROOT_DIR PATH $optarg - append_cache_entry Lintel_ROOT_DIR PATH $optarg - ;; - --with-xml2=*) - append_cache_entry LibXML2_ROOT_DIR PATH $optarg - ;; - --with-curl=*) - append_cache_entry LibCURL_ROOT_DIR PATH $optarg - ;; --binary-package) append_cache_entry BINARY_PACKAGING_MODE BOOL true ;; diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html index 2f8ea02aff..3df56a12ff 100644 --- a/doc/_templates/layout.html +++ b/doc/_templates/layout.html @@ -10,7 +10,7 @@ {% endblock %} {% block header %} - {% endblock %} @@ -108,6 +108,6 @@ {% endblock %} {% block footer %} - {% endblock %} diff --git a/doc/devel/plugins.rst b/doc/devel/plugins.rst index cc34c399d2..c703345891 100644 --- a/doc/devel/plugins.rst +++ b/doc/devel/plugins.rst @@ -17,11 +17,11 @@ functionality to Bro: - File analyzers. - - Packet sources and packet dumpers. TODO: Not yet. + - Packet sources and packet dumpers. - - Logging framework backends. TODO: Not yet. + - Logging framework backends. - - Input framework readers. TODO: Not yet. + - Input framework readers. A plugin's functionality is available to the user just as if Bro had the corresponding code built-in. Indeed, internally many of Bro's @@ -57,10 +57,10 @@ called ``Demo::Rot13``. The ``init-plugin`` script puts a number of files in place. The full layout is described later. For now, all we need is -``src/functions.bif``. It's initially empty, but we'll add our new bif +``src/rot13.bif``. It's initially empty, but we'll add our new bif there as follows:: - # cat scripts/functions.bif + # cat src/rot13.bif module CaesarCipher; function rot13%(s: string%) : string @@ -73,23 +73,25 @@ there as follows:: *p = (*p - b + 13) % 26 + b; } - return new StringVal(new BroString(1, rot13, strlen(rot13))); + BroString* bs = new BroString(1, reinterpret_cast(rot13), + strlen(rot13)); + return new StringVal(bs); %} The syntax of this file is just like any other ``*.bif`` file; we won't go into it here. Now we can already compile our plugin, we just need to tell the -Makefile put in place by ``init-plugin`` where the Bro source tree is -located (Bro needs to have been built there first):: +configure script put in place by ``init-plugin`` where the Bro source +tree is located (Bro needs to have been built there first):: - # make BRO=/path/to/bro/dist + # ./configure --bro-dist=/path/to/bro/dist && make [... cmake output ...] Now our ``rot13-plugin`` directory has everything that it needs for Bro to recognize it as a dynamic plugin. Once we point Bro to it, it will pull it in automatically, as we can check with the ``-N`` -option: +option:: # export BRO_PLUGIN_PATH=/path/to/rot13-plugin # bro -N @@ -100,7 +102,7 @@ option: That looks quite good, except for the dummy description that we should replace with something nicer so that users will know what our plugin is about. We do this by editing the ``config.description`` line in -``src/Plugin.cc``, like this: +``src/Plugin.cc``, like this:: [...] plugin::Configuration Configure() @@ -193,7 +195,7 @@ directory. A directory with the plugin's custom Bro scripts. When the plugin gets activated, this directory will be automatically added to ``BROPATH``, so that any scripts/modules inside can be - ``@load``ed. + "@load"ed. ``scripts``/__load__.bro A Bro script that will be loaded immediately when the plugin gets @@ -263,23 +265,25 @@ plugins to unconditionally activate, even in bare mode. activated plugins. Note that plugins compiled statically into Bro are always activated, and hence show up as such even in bare mode. -Plugin Component -================ +Plugin Components +================= -The following gives additional information about providing individual -types of functionality via plugins. Note that a single plugin can -provide more than one type. For example, a plugin could provide -multiple protocol analyzers at once; or both a logging backend and -input reader at the same time. +The following subsections detail providing individual types of +functionality via plugins. Note that a single plugin can provide more +than one component type. For example, a plugin could provide multiple +protocol analyzers at once; or both a logging backend and input reader +at the same time. -We now walk briefly through the specifics of providing a specific type -of functionality (a *component*) through a plugin. We'll focus on -their interfaces to the plugin system, rather than specifics on -writing the corresponding logic (usually the best way to get going on -that is to start with an existing plugin providing a corresponding -component and adapt that). We'll also point out how the CMake -infrastructure put in place by the ``init-plugin`` helper script ties -the various pieces together. +.. todo:: + + These subsections are mostly missing right now, as much of their + content isn't actually plugin-specific, but concerns generally + writing such functionality for Bro. The best way to get started + right now is to look at existing code implementing similar + functionality, either as a plugin or inside Bro proper. Also, for + each component type there's a unit test in + ``testing/btest/plugins`` creating a basic plugin skeleton with a + corresponding component. Bro Scripts ----------- @@ -313,22 +317,22 @@ TODO. Logging Writer -------------- -Not yet available as plugins. +TODO. Input Reader ------------ -Not yet available as plugins. +TODO. Packet Sources -------------- -Not yet available as plugins. +TODO. Packet Dumpers -------------- -Not yet available as plugins. +TODO. Hooks ===== @@ -410,25 +414,32 @@ Run the test-suite:: Debugging Plugins ================= -Plugins can use Bro's standard debug logger by using the -``PLUGIN_DBG_LOG(, )`` macro (defined in -``DebugLogger.h``), where ```` is the ``Plugin`` instance and -```` are printf-style arguments, just as with Bro's standard -debuggging macros. +If your plugin isn't loading as expected, Bro's debugging facilities +can help to illuminate what's going on. To enable, recompile Bro +with debugging support (``./configure --enable-debug``), and +afterwards rebuild your plugin as well. If you then run Bro with ``-B +plugins``, it will produce a file ``debug.log`` that records details +about the process for searching, loading, and activating plugins. -At runtime, one then activates a plugin's debugging output with ``-B -plugin-``, where ```` is the name of the plugin as -returned by its ``Configure()`` method, yet with the -namespace-separator ``::`` replaced with a simple dash. Example: If -the plugin is called ``Bro::Demo``, use ``-B plugin-Bro-Demo``. As -usual, the debugging output will be recorded to ``debug.log`` if Bro's -compiled in debug mode. +To generate your own debugging output from inside your plugin, you can +add a custom debug stream by using the ``PLUGIN_DBG_LOG(, +)`` macro (defined in ``DebugLogger.h``), where ```` is +the ``Plugin`` instance and ```` are printf-style arguments, +just as with Bro's standard debugging macros (grep for ``DBG_LOG`` in +Bro's ``src/`` to see examples). At runtime, you can then activate +your plugin's debugging output with ``-B plugin-``, where +```` is the name of the plugin as returned by its +``Configure()`` method, yet with the namespace-separator ``::`` +replaced with a simple dash. Example: If the plugin is called +``Bro::Demo``, use ``-B plugin-Bro-Demo``. As usual, the debugging +output will be recorded to ``debug.log`` if Bro's compiled in debug +mode. Documenting Plugins =================== -..todo:: +.. todo:: Integrate all this with Broxygen. diff --git a/doc/ext/bro.py b/doc/ext/bro.py index 9295c63312..1df4a518c2 100644 --- a/doc/ext/bro.py +++ b/doc/ext/bro.py @@ -176,6 +176,10 @@ class BroIdentifier(BroGeneric): def get_index_text(self, objectname, name): return name +class BroKeyword(BroGeneric): + def get_index_text(self, objectname, name): + return name + class BroAttribute(BroGeneric): def get_index_text(self, objectname, name): return _('%s (attribute)') % (name) @@ -213,6 +217,7 @@ class BroDomain(Domain): 'type': ObjType(l_('type'), 'type'), 'namespace': ObjType(l_('namespace'), 'namespace'), 'id': ObjType(l_('id'), 'id'), + 'keyword': ObjType(l_('keyword'), 'keyword'), 'enum': ObjType(l_('enum'), 'enum'), 'attr': ObjType(l_('attr'), 'attr'), } @@ -221,6 +226,7 @@ class BroDomain(Domain): 'type': BroGeneric, 'namespace': BroNamespace, 'id': BroIdentifier, + 'keyword': BroKeyword, 'enum': BroEnum, 'attr': BroAttribute, } @@ -229,6 +235,7 @@ class BroDomain(Domain): 'type': XRefRole(), 'namespace': XRefRole(), 'id': XRefRole(), + 'keyword': XRefRole(), 'enum': XRefRole(), 'attr': XRefRole(), 'see': XRefRole(), diff --git a/doc/frameworks/logging-dataseries.rst b/doc/frameworks/logging-dataseries.rst deleted file mode 100644 index cc479eae76..0000000000 --- a/doc/frameworks/logging-dataseries.rst +++ /dev/null @@ -1,186 +0,0 @@ - -============================= -Binary Output with DataSeries -============================= - -.. rst-class:: opening - - Bro's default ASCII log format is not exactly the most efficient - way for storing and searching large volumes of data. An an - alternative, Bro comes with experimental support for `DataSeries - `_ - output, an efficient binary format for recording structured bulk - data. DataSeries is developed and maintained at HP Labs. - -.. contents:: - -Installing DataSeries ---------------------- - -To use DataSeries, its libraries must be available at compile-time, -along with the supporting *Lintel* package. Generally, both are -distributed on `HP Labs' web site -`_. Currently, however, you need -to use recent development versions for both packages, which you can -download from github like this:: - - git clone http://github.com/dataseries/Lintel - git clone http://github.com/dataseries/DataSeries - -To build and install the two into ````, do:: - - ( cd Lintel && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX= .. && make && make install ) - ( cd DataSeries && mkdir build && cd build && cmake -DCMAKE_INSTALL_PREFIX= .. && make && make install ) - -Please refer to the packages' documentation for more information about -the installation process. In particular, there's more information on -required and optional `dependencies for Lintel -`_ -and `dependencies for DataSeries -`_. -For users on RedHat-style systems, you'll need the following:: - - yum install libxml2-devel boost-devel - -Compiling Bro with DataSeries Support -------------------------------------- - -Once you have installed DataSeries, Bro's ``configure`` should pick it -up automatically as long as it finds it in a standard system location. -Alternatively, you can specify the DataSeries installation prefix -manually with ``--with-dataseries=``. Keep an eye on -``configure``'s summary output, if it looks like the following, Bro -found DataSeries and will compile in the support:: - - # ./configure --with-dataseries=/usr/local - [...] - ====================| Bro Build Summary |===================== - [...] - DataSeries: true - [...] - ================================================================ - -Activating DataSeries ---------------------- - -The direct way to use DataSeries is to switch *all* log files over to -the binary format. To do that, just add ``redef -Log::default_writer=Log::WRITER_DATASERIES;`` to your ``local.bro``. -For testing, you can also just pass that on the command line:: - - bro -r trace.pcap Log::default_writer=Log::WRITER_DATASERIES - -With that, Bro will now write all its output into DataSeries files -``*.ds``. You can inspect these using DataSeries's set of command line -tools, which its installation process installs into ``/bin``. -For example, to convert a file back into an ASCII representation:: - - $ ds2txt conn.log - [... We skip a bunch of metadata here ...] - ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes - 1300475167.096535 CRCC5OdDlXe 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 - 1300475167.097012 o7XBsfvo3U1 fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp 0.000000 0 0 S0 F 0 D 1 199 0 0 - 1300475167.099816 pXPi1kPMgxb 141.142.220.50 5353 224.0.0.251 5353 udp 0.000000 0 0 S0 F 0 D 1 179 0 0 - 1300475168.853899 R7sOc16woCj 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 38 89 SF F 0 Dd 1 66 1 117 - 1300475168.854378 Z6dfHVmt0X7 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 52 99 SF F 0 Dd 1 80 1 127 - 1300475168.854837 k6T92WxgNAh 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF F 0 Dd 1 66 1 211 - [...] - -(``--skip-all`` suppresses the metadata.) - -Note that the ASCII conversion is *not* equivalent to Bro's default -output format. - -You can also switch only individual files over to DataSeries by adding -code like this to your ``local.bro``: - -.. code:: bro - - event bro_init() - { - local f = Log::get_filter(Conn::LOG, "default"); # Get default filter for connection log. - f$writer = Log::WRITER_DATASERIES; # Change writer type. - Log::add_filter(Conn::LOG, f); # Replace filter with adapted version. - } - -Bro's DataSeries writer comes with a few tuning options, see -:doc:`/scripts/base/frameworks/logging/writers/dataseries.bro`. - -Working with DataSeries -======================= - -Here are a few examples of using DataSeries command line tools to work -with the output files. - -* Printing CSV:: - - $ ds2txt --csv conn.log - ts,uid,id.orig_h,id.orig_p,id.resp_h,id.resp_p,proto,service,duration,orig_bytes,resp_bytes,conn_state,local_orig,missed_bytes,history,orig_pkts,orig_ip_bytes,resp_pkts,resp_ip_bytes - 1258790493.773208,ZTtgbHvf4s3,192.168.1.104,137,192.168.1.255,137,udp,dns,3.748891,350,0,S0,F,0,D,7,546,0,0 - 1258790451.402091,pOY6Rw7lhUd,192.168.1.106,138,192.168.1.255,138,udp,,0.000000,0,0,S0,F,0,D,1,229,0,0 - 1258790493.787448,pn5IiEslca9,192.168.1.104,138,192.168.1.255,138,udp,,2.243339,348,0,S0,F,0,D,2,404,0,0 - 1258790615.268111,D9slyIu3hFj,192.168.1.106,137,192.168.1.255,137,udp,dns,3.764626,350,0,S0,F,0,D,7,546,0,0 - [...] - - Add ``--separator=X`` to set a different separator. - -* Extracting a subset of columns:: - - $ ds2txt --select '*' ts,id.resp_h,id.resp_p --skip-all conn.log - 1258790493.773208 192.168.1.255 137 - 1258790451.402091 192.168.1.255 138 - 1258790493.787448 192.168.1.255 138 - 1258790615.268111 192.168.1.255 137 - 1258790615.289842 192.168.1.255 138 - [...] - -* Filtering rows:: - - $ ds2txt --where '*' 'duration > 5 && id.resp_p > 1024' --skip-all conn.ds - 1258790631.532888 V8mV5WLITu5 192.168.1.105 55890 239.255.255.250 1900 udp 15.004568 798 0 S0 F 0 D 6 966 0 0 - 1258792413.439596 tMcWVWQptvd 192.168.1.105 55890 239.255.255.250 1900 udp 15.004581 798 0 S0 F 0 D 6 966 0 0 - 1258794195.346127 cQwQMRdBrKa 192.168.1.105 55890 239.255.255.250 1900 udp 15.005071 798 0 S0 F 0 D 6 966 0 0 - 1258795977.253200 i8TEjhWd2W8 192.168.1.105 55890 239.255.255.250 1900 udp 15.004824 798 0 S0 F 0 D 6 966 0 0 - 1258797759.160217 MsLsBA8Ia49 192.168.1.105 55890 239.255.255.250 1900 udp 15.005078 798 0 S0 F 0 D 6 966 0 0 - 1258799541.068452 TsOxRWJRGwf 192.168.1.105 55890 239.255.255.250 1900 udp 15.004082 798 0 S0 F 0 D 6 966 0 0 - [...] - -* Calculate some statistics: - - Mean/stddev/min/max over a column:: - - $ dsstatgroupby '*' basic duration from conn.ds - # Begin DSStatGroupByModule - # processed 2159 rows, where clause eliminated 0 rows - # count(*), mean(duration), stddev, min, max - 2159, 42.7938, 1858.34, 0, 86370 - [...] - - Quantiles of total connection volume:: - - $ dsstatgroupby '*' quantile 'orig_bytes + resp_bytes' from conn.ds - [...] - 2159 data points, mean 24616 +- 343295 [0,1.26615e+07] - quantiles about every 216 data points: - 10%: 0, 124, 317, 348, 350, 350, 601, 798, 1469 - tails: 90%: 1469, 95%: 7302, 99%: 242629, 99.5%: 1226262 - [...] - -The ``man`` pages for these tools show further options, and their -``-h`` option gives some more information (either can be a bit cryptic -unfortunately though). - -Deficiencies ------------- - -Due to limitations of the DataSeries format, one cannot inspect its -files before they have been fully written. In other words, when using -DataSeries, it's currently not possible to inspect the live log -files inside the spool directory before they are rotated to their -final location. It seems that this could be fixed with some effort, -and we will work with DataSeries development team on that if the -format gains traction among Bro users. - -Likewise, we're considering writing custom command line tools for -interacting with DataSeries files, making that a bit more convenient -than what the standard utilities provide. diff --git a/doc/frameworks/logging-elasticsearch.rst b/doc/frameworks/logging-elasticsearch.rst deleted file mode 100644 index 3f1eac859d..0000000000 --- a/doc/frameworks/logging-elasticsearch.rst +++ /dev/null @@ -1,89 +0,0 @@ - -========================================= -Indexed Logging Output with ElasticSearch -========================================= - -.. rst-class:: opening - - Bro's default ASCII log format is not exactly the most efficient - way for searching large volumes of data. ElasticSearch - is a new data storage technology for dealing with tons of data. - It's also a search engine built on top of Apache's Lucene - project. It scales very well, both for distributed indexing and - distributed searching. - -.. contents:: - -Warning -------- - -This writer plugin is still in testing and is not yet recommended for -production use! The approach to how logs are handled in the plugin is "fire -and forget" at this time, there is no error handling if the server fails to -respond successfully to the insertion request. - -Installing ElasticSearch ------------------------- - -Download the latest version from: http://www.elasticsearch.org/download/. -Once extracted, start ElasticSearch with:: - -# ./bin/elasticsearch - -For more detailed information, refer to the ElasticSearch installation -documentation: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html - -Compiling Bro with ElasticSearch Support ----------------------------------------- - -First, ensure that you have libcurl installed then run configure:: - - # ./configure - [...] - ====================| Bro Build Summary |===================== - [...] - cURL: true - [...] - ElasticSearch: true - [...] - ================================================================ - -Activating ElasticSearch ------------------------- - -The easiest way to enable ElasticSearch output is to load the -tuning/logs-to-elasticsearch.bro script. If you are using BroControl, -the following line in local.bro will enable it: - -.. console:: - - @load tuning/logs-to-elasticsearch - -With that, Bro will now write most of its logs into ElasticSearch in addition -to maintaining the Ascii logs like it would do by default. That script has -some tunable options for choosing which logs to send to ElasticSearch, refer -to the autogenerated script documentation for those options. - -There is an interface being written specifically to integrate with the data -that Bro outputs into ElasticSearch named Brownian. It can be found here:: - - https://github.com/grigorescu/Brownian - -Tuning ------- - -A common problem encountered with ElasticSearch is too many files being held -open. The ElasticSearch website has some suggestions on how to increase the -open file limit. - - - http://www.elasticsearch.org/tutorials/too-many-open-files/ - -TODO ----- - -Lots. - -- Perform multicast discovery for server. -- Better error detection. -- Better defaults (don't index loaded-plugins, for instance). -- diff --git a/doc/frameworks/logging.rst b/doc/frameworks/logging.rst index 47d3338e8a..c64ab02489 100644 --- a/doc/frameworks/logging.rst +++ b/doc/frameworks/logging.rst @@ -38,7 +38,7 @@ Bro's logging interface is built around three main abstractions: Writers A writer defines the actual output format for the information being logged. At the moment, Bro comes with only one type of - writer, which produces tab separated ASCII files. In the + writer, which produces tab separated ASCII files. In the future we will add further writers, like for binary output and direct logging into a database. @@ -98,7 +98,7 @@ Note the fields that are set for the filter: ``include`` A set limiting the fields to the ones given. The names correspond to those in the :bro:type:`Conn::Info` record, with - sub-records unrolled by concatenating fields (separated with + sub-records unrolled by concatenating fields (separated with dots). Using the code above, you will now get a new log file ``origs.log`` @@ -155,7 +155,7 @@ that returns the desired path: { local filter: Log::Filter = [$name="conn-split", $path_func=split_log, $include=set("ts", "id.orig_h")]; Log::add_filter(Conn::LOG, filter); - } + } Running this will now produce two files, ``local.log`` and ``remote.log``, with the corresponding entries. One could extend this @@ -263,7 +263,7 @@ specific destination exceeds a certain duration: .. code:: bro redef enum Notice::Type += { - ## Indicates that a connection remained established longer + ## Indicates that a connection remained established longer ## than 5 minutes. Long_Conn_Found }; @@ -271,8 +271,8 @@ specific destination exceeds a certain duration: event Conn::log_conn(rec: Conn::Info) { if ( rec$duration > 5mins ) - NOTICE([$note=Long_Conn_Found, - $msg=fmt("unusually long conn to %s", rec$id$resp_h), + NOTICE([$note=Long_Conn_Found, + $msg=fmt("unusually long conn to %s", rec$id$resp_h), $id=rec$id]); } @@ -335,11 +335,11 @@ example for the ``Foo`` module: # Define a hook event. By convention, this is called # "log_". global log_foo: event(rec: Info); - + } # This event should be handled at a higher priority so that when - # users modify your stream later and they do it at priority 0, + # users modify your stream later and they do it at priority 0, # their code runs after this. event bro_init() &priority=5 { @@ -356,7 +356,7 @@ it easily accessible across event handlers: foo: Info &optional; } -Now you can use the :bro:id:`Log::write` method to output log records and +Now you can use the :bro:id:`Log::write` method to output log records and save the logged ``Foo::Info`` record into the connection record: .. code:: bro @@ -380,11 +380,11 @@ uncommon to need to delete that data before the end of the connection. Other Writers ------------- -Bro supports the following output formats other than ASCII: +Bro supports the following built-in output formats other than ASCII: .. toctree:: :maxdepth: 1 - logging-dataseries - logging-elasticsearch logging-input-sqlite + +Further formats are available as external plugins. diff --git a/doc/index.rst b/doc/index.rst index 6161ee1ff8..22fb8cbe1a 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -45,7 +45,13 @@ Reference Section script-reference/index.rst components/index.rst -.. +Development +=========== + +.. toctree:: + :maxdepth: 2 + + devel/plugins.rst * :ref:`General Index ` * :ref:`search` diff --git a/doc/install/install.rst b/doc/install/install.rst index 9a258773ce..0052acafb0 100644 --- a/doc/install/install.rst +++ b/doc/install/install.rst @@ -180,7 +180,7 @@ automatically. Finally, use ``make install-aux`` to install some of the other programs that are in the ``aux/bro-aux`` directory. OpenBSD users, please see our `FAQ -`_ if you are having +`_ if you are having problems installing Bro. Finally, if you want to build the Bro documentation (not required, because diff --git a/doc/logs/index.rst b/doc/logs/index.rst index 7c7006054f..a8fb951c80 100644 --- a/doc/logs/index.rst +++ b/doc/logs/index.rst @@ -111,7 +111,9 @@ default, including: such "crud" that is usually not worth following up on. As you can see, some log files are specific to a particular protocol, -while others aggregate information across different types of activity. +while others aggregate information across different types of activity. +For a complete list of log files and a description of its purpose, +see :doc:`List of Log Files <../script-reference/list-of-log-files>`. .. _bro-cut: @@ -250,44 +252,3 @@ protocol, it can have multiple ``GET``/``POST``/etc requests in a stream and Bro is able to extract and track that information for you, giving you an in-depth and structured view into HTTP traffic on your network. - ------------------------ -Common Log Files ------------------------ -As a monitoring tool, Bro records a detailed view of the traffic inspected -and the events generated in a series of relevant log files. These files can -later be reviewed for monitoring, auditing and troubleshooting purposes. - -In this section we present a brief explanation of the most commonly used log -files generated by Bro including links to descriptions of some of the fields -for each log type. - -+-----------------+---------------------------------------+------------------------------+ -| Log File | Description | Field Descriptions | -+=================+=======================================+==============================+ -| http.log | Shows all HTTP requests and replies | :bro:type:`HTTP::Info` | -+-----------------+---------------------------------------+------------------------------+ -| ftp.log | Records FTP activity | :bro:type:`FTP::Info` | -+-----------------+---------------------------------------+------------------------------+ -| ssl.log | Records SSL sessions including | :bro:type:`SSL::Info` | -| | certificates used | | -+-----------------+---------------------------------------+------------------------------+ -| known_certs.log | Includes SSL certificates used | :bro:type:`Known::CertsInfo` | -+-----------------+---------------------------------------+------------------------------+ -| smtp.log | Summarizes SMTP traffic on a network | :bro:type:`SMTP::Info` | -+-----------------+---------------------------------------+------------------------------+ -| dns.log | Shows all DNS activity on a network | :bro:type:`DNS::Info` | -+-----------------+---------------------------------------+------------------------------+ -| conn.log | Records all connections seen by Bro | :bro:type:`Conn::Info` | -+-----------------+---------------------------------------+------------------------------+ -| dpd.log | Shows network activity on | :bro:type:`DPD::Info` | -| | non-standard ports | | -+-----------------+---------------------------------------+------------------------------+ -| files.log | Records information about all files | :bro:type:`Files::Info` | -| | transmitted over the network | | -+-----------------+---------------------------------------+------------------------------+ -| weird.log | Records unexpected protocol-level | :bro:type:`Weird::Info` | -| | activity | | -+-----------------+---------------------------------------+------------------------------+ - - diff --git a/doc/quickstart/index.rst b/doc/quickstart/index.rst index 173373c769..bb642ee75a 100644 --- a/doc/quickstart/index.rst +++ b/doc/quickstart/index.rst @@ -1,5 +1,5 @@ -.. _FAQ: http://www.bro.org/documentation/faq.html +.. _FAQ: //www.bro.org/documentation/faq.html .. _quickstart: diff --git a/doc/script-reference/attributes.rst b/doc/script-reference/attributes.rst new file mode 100644 index 0000000000..5680a034ff --- /dev/null +++ b/doc/script-reference/attributes.rst @@ -0,0 +1,232 @@ +Attributes +========== + +The Bro scripting language supports the following attributes. + ++-----------------------------+-----------------------------------------------+ +| Name | Description | ++=============================+===============================================+ +| :bro:attr:`&redef` |Redefine a global constant or extend a type. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&priority` |Specify priority for event handler or hook. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&log` |Mark a record field as to be written to a log. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&optional` |Allow a record field value to be missing. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&default` |Specify a default value. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&add_func` |Specify a function to call for each "redef +=".| ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&delete_func` |Same as "&add_func", except for "redef -=". | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&expire_func` |Specify a function to call when container | +| |element expires. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&read_expire` |Specify a read timeout interval. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&write_expire` |Specify a write timeout interval. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&create_expire` |Specify a creation timeout interval. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&synchronized` |Synchronize a variable across nodes. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&persistent` |Make a variable persistent (written to disk). | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&rotate_interval`|Rotate a file after specified interval. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&rotate_size` |Rotate a file after specified file size. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&encrypt` |Encrypt a file when writing to disk. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&raw_output` |Open file in raw mode (chars. are not escaped).| ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&mergeable` |Prefer set union for synchronized state. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&group` |Group event handlers to activate/deactivate. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&error_handler` |Used internally for reporter framework events. | ++-----------------------------+-----------------------------------------------+ +| :bro:attr:`&type_column` |Used by input framework for "port" type. | ++-----------------------------+-----------------------------------------------+ + +Here is a more detailed explanation of each attribute: + +.. bro:attr:: &redef + + Allows for redefinition of initial values of global objects declared as + constant. + + In this example, the constant (assuming it is global) can be redefined + with a :bro:keyword:`redef` at some later point:: + + const clever = T &redef; + +.. bro:attr:: &priority + + Specifies the execution priority (as a signed integer) of a hook or + event handler. Higher values are executed before lower ones. The + default value is 0. Example:: + + event bro_init() &priority=10 + { + print "high priority"; + } + +.. bro:attr:: &log + + Writes a :bro:type:`record` field to the associated log stream. + +.. bro:attr:: &optional + + Allows a record field value to be missing (i.e., neither initialized nor + ever assigned a value). + + In this example, the record could be instantiated with either + "myrec($a=127.0.0.1)" or "myrec($a=127.0.0.1, $b=80/tcp)":: + + type myrec: record { a: addr; b: port &optional; }; + + The ``?$`` operator can be used to check if a record field has a value or + not (it returns a ``bool`` value of ``T`` if the field has a value, + and ``F`` if not). + +.. bro:attr:: &default + + Specifies a default value for a record field, container element, or a + function/hook/event parameter. + + In this example, the record could be instantiated with either + "myrec($a=5, $c=3.14)" or "myrec($a=5, $b=53/udp, $c=3.14)":: + + type myrec: record { a: count; b: port &default=80/tcp; c: double; }; + + In this example, the table will return the string ``"foo"`` for any + attempted access to a non-existing index:: + + global mytable: table[count] of string &default="foo"; + + When used with function/hook/event parameters, all of the parameters + with the "&default" attribute must come after all other parameters. + For example, the following function could be called either as "myfunc(5)" + or as "myfunc(5, 53/udp)":: + + function myfunc(a: count, b: port &default=80/tcp) + { + print a, b; + } + +.. bro:attr:: &add_func + + Can be applied to an identifier with &redef to specify a function to + be called any time a "redef += ..." declaration is parsed. The + function takes two arguments of the same type as the identifier, the first + being the old value of the variable and the second being the new + value given after the "+=" operator in the "redef" declaration. The + return value of the function will be the actual new value of the + variable after the "redef" declaration is parsed. + +.. bro:attr:: &delete_func + + Same as :bro:attr:`&add_func`, except for :bro:keyword:`redef` declarations + that use the "-=" operator. + +.. bro:attr:: &expire_func + + Called right before a container element expires. The function's + first parameter is of the same type of the container and the second + parameter the same type of the container's index. The return + value is an :bro:type:`interval` indicating the amount of additional + time to wait before expiring the container element at the given + index (which will trigger another execution of this function). + +.. bro:attr:: &read_expire + + Specifies a read expiration timeout for container elements. That is, + the element expires after the given amount of time since the last + time it has been read. Note that a write also counts as a read. + +.. bro:attr:: &write_expire + + Specifies a write expiration timeout for container elements. That + is, the element expires after the given amount of time since the + last time it has been written. + +.. bro:attr:: &create_expire + + Specifies a creation expiration timeout for container elements. That + is, the element expires after the given amount of time since it has + been inserted into the container, regardless of any reads or writes. + +.. bro:attr:: &synchronized + + Synchronizes variable accesses across nodes. The value of a + ``&synchronized`` variable is automatically propagated to all peers + when it changes. + +.. bro:attr:: &persistent + + Makes a variable persistent, i.e., its value is written to disk (per + default at shutdown time). + +.. bro:attr:: &rotate_interval + + Rotates a file after a specified interval. + +.. bro:attr:: &rotate_size + + Rotates a file after it has reached a given size in bytes. + +.. bro:attr:: &encrypt + + Encrypts files right before writing them to disk. + +.. bro:attr:: &raw_output + + Opens a file in raw mode, i.e., non-ASCII characters are not + escaped. + +.. bro:attr:: &mergeable + + Prefers merging sets on assignment for synchronized state. This + attribute is used in conjunction with :bro:attr:`&synchronized` + container types: when the same container is updated at two peers + with different values, the propagation of the state causes a race + condition, where the last update succeeds. This can cause + inconsistencies and can be avoided by unifying the two sets, rather + than merely overwriting the old value. + +.. bro:attr:: &group + + Groups event handlers such that those in the same group can be + jointly activated or deactivated. + +.. bro:attr:: &error_handler + + Internally set on the events that are associated with the reporter + framework: :bro:id:`reporter_info`, :bro:id:`reporter_warning`, and + :bro:id:`reporter_error`. It prevents any handlers of those events + from being able to generate reporter messages that go through any of + those events (i.e., it prevents an infinite event recursion). Instead, + such nested reporter messages are output to stderr. + +.. bro:attr:: &type_column + + Used by the input framework. It can be used on columns of type + :bro:type:`port` (such a column only contains the port number) and + specifies the name of an additional column in + the input file which specifies the protocol of the port (tcp/udp/icmp). + + In the following example, the input file would contain four columns + named "ip", "srcp", "proto", and "msg":: + + type Idx: record { + ip: addr; + }; + + + type Val: record { + srcp: port &type_column = "proto"; + msg: string; + }; + diff --git a/doc/script-reference/directives.rst b/doc/script-reference/directives.rst new file mode 100644 index 0000000000..f98f328191 --- /dev/null +++ b/doc/script-reference/directives.rst @@ -0,0 +1,173 @@ +Directives +========== + +The Bro scripting language supports a number of directives that can +affect which scripts will be loaded or which lines in a script will be +executed. Directives are evaluated before script execution begins. + +.. bro:keyword:: @DEBUG + + TODO + + +.. bro:keyword:: @DIR + + Expands to the directory pathname where the current script is located. + + Example:: + + print "Directory:", @DIR; + + +.. bro:keyword:: @FILENAME + + Expands to the filename of the current script. + + Example:: + + print "File:", @FILENAME; + +.. bro:keyword:: @load + + Loads the specified Bro script, specified as the relative pathname + of the file (relative to one of the directories in Bro's file search path). + If the Bro script filename ends with ".bro", then you don't need to + specify the file extension. The filename cannot contain any whitespace. + + In this example, Bro will try to load a script + "policy/misc/capture-loss.bro" by looking in each directory in the file + search path (the file search path can be changed by setting the BROPATH + environment variable):: + + @load policy/misc/capture-loss + + If you specify the name of a directory instead of a filename, then + Bro will try to load a file in that directory called "__load__.bro" + (presumably that file will contain additional "@load" directives). + + In this example, Bro will try to load a file "tuning/defaults/__load__.bro" + by looking in each directory in the file search path:: + + @load tuning/defaults + + The purpose of this directive is to ensure that all script dependencies + are satisfied, and to avoid having to list every needed Bro script + on the command-line. Bro keeps track of which scripts have been + loaded, so it is not an error to load a script more than once (once + a script has been loaded, any subsequent "@load" directives + for that script are ignored). + + +.. bro:keyword:: @load-sigs + + This works similarly to "@load", except that in this case the filename + represents a signature file (not a Bro script). If the signature filename + ends with ".sig", then you don't need to specify the file extension + in the "@load-sigs" directive. The filename cannot contain any + whitespace. + + In this example, Bro will try to load a signature file + "base/protocols/ssl/dpd.sig":: + + @load-sigs base/protocols/ssl/dpd + + The format for a signature file is explained in the documentation for the + `Signature Framework <../frameworks/signatures.html>`_. + + +.. bro:keyword:: @unload + + This specifies a Bro script that we don't want to load (so a subsequent + attempt to load the specified script will be skipped). However, + if the specified script has already been loaded, then this directive + has no affect. + + In the following example, if the "policy/misc/capture-loss.bro" script + has not been loaded yet, then Bro will not load it:: + + @unload policy/misc/capture-loss + + +.. bro:keyword:: @prefixes + + Specifies a filename prefix to use when looking for script files + to load automatically. The prefix cannot contain any whitespace. + + In the following example, the prefix "cluster" is used and all prefixes + that were previously specified are not used:: + + @prefixes = cluster + + In the following example, the prefix "cluster-manager" is used in + addition to any previously-specified prefixes:: + + @prefixes += cluster-manager + + The way this works is that after Bro parses all script files, then for each + loaded script Bro will take the absolute path of the script and then + it removes the portion of the directory path that is in Bro's file + search path. Then it replaces each "/" character with a period "." + and then prepends the prefix (specified in the "@prefixes" directive) + followed by a period. The resulting filename is searched for in each + directory in Bro's file search path. If a matching file is found, then + the file is automatically loaded. + + For example, if a script called "local.bro" has been loaded, and a prefix + of "test" was specified, then Bro will look for a file named + "test.local.bro" in each directory of Bro's file search path. + + An alternative way to specify prefixes is to use the "-p" Bro + command-line option. + +.. bro:keyword:: @if + + The specified expression must evaluate to type :bro:type:`bool`. If the + value is true, then the following script lines (up to the next "@else" + or "@endif") are available to be executed. + + Example:: + + @if ( ver == 2 ) + print "version 2 detected"; + @endif + +.. bro:keyword:: @ifdef + + This works like "@if", except that the result is true if the specified + identifier is defined. + + Example:: + + @ifdef ( pi ) + print "pi is defined"; + @endif + +.. bro:keyword:: @ifndef + + This works exactly like "@ifdef", except that the result is true if the + specified identifier is not defined. + + Example:: + + @ifndef ( pi ) + print "pi is not defined"; + @endif + +.. bro:keyword:: @else + + This directive is optional after an "@if", "@ifdef", or + "@ifndef". If present, it provides an else clause. + + Example:: + + @ifdef ( pi ) + print "pi is defined"; + @else + print "pi is not defined"; + @endif + +.. bro:keyword:: @endif + + This directive is required to terminate each "@if", "@ifdef", or + "@ifndef". + diff --git a/doc/script-reference/index.rst b/doc/script-reference/index.rst index bd600e4a97..932b79a78c 100644 --- a/doc/script-reference/index.rst +++ b/doc/script-reference/index.rst @@ -5,10 +5,17 @@ Script Reference .. toctree:: :maxdepth: 1 + operators + types + attributes + statements + directives + log-files notices proto-analyzers file-analyzers - builtins packages scripts Broxygen Example Script + + diff --git a/doc/script-reference/log-files.rst b/doc/script-reference/log-files.rst new file mode 100644 index 0000000000..208a692443 --- /dev/null +++ b/doc/script-reference/log-files.rst @@ -0,0 +1,148 @@ +========= +Log Files +========= + +Listed below are the log files generated by Bro, including a brief description +of the log file and links to descriptions of the fields for each log +type. + +Network Protocols +----------------- + ++----------------------------+---------------------------------------+---------------------------------+ +| Log File | Description | Field Descriptions | ++============================+=======================================+=================================+ +| conn.log | TCP/UDP/ICMP connections | :bro:type:`Conn::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| dhcp.log | DHCP leases | :bro:type:`DHCP::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| dnp3.log | DNP3 requests and replies | :bro:type:`DNP3::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| dns.log | DNS activity | :bro:type:`DNS::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| ftp.log | FTP activity | :bro:type:`FTP::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| http.log | HTTP requests and replies | :bro:type:`HTTP::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| irc.log | IRC commands and responses | :bro:type:`IRC::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| modbus.log | Modbus commands and responses | :bro:type:`Modbus::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| modbus_register_change.log | Tracks changes to Modbus holding | :bro:type:`Modbus::MemmapInfo` | +| | registers | | ++----------------------------+---------------------------------------+---------------------------------+ +| radius.log | RADIUS authentication attempts | :bro:type:`RADIUS::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| smtp.log | SMTP transactions | :bro:type:`SMTP::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| snmp.log | SNMP messages | :bro:type:`SNMP::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| socks.log | SOCKS proxy requests | :bro:type:`SOCKS::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| ssh.log | SSH connections | :bro:type:`SSH::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| ssl.log | SSL/TLS handshake info | :bro:type:`SSL::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| syslog.log | Syslog messages | :bro:type:`Syslog::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| tunnel.log | Tunneling protocol events | :bro:type:`Tunnel::Info` | ++----------------------------+---------------------------------------+---------------------------------+ + +Files +----- + ++----------------------------+---------------------------------------+---------------------------------+ +| Log File | Description | Field Descriptions | ++============================+=======================================+=================================+ +| files.log | File analysis results | :bro:type:`Files::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| x509.log | X.509 certificate info | :bro:type:`X509::Info` | ++----------------------------+---------------------------------------+---------------------------------+ + +Detection +--------- + ++----------------------------+---------------------------------------+---------------------------------+ +| Log File | Description | Field Descriptions | ++============================+=======================================+=================================+ +| intel.log | Intelligence data matches | :bro:type:`Intel::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| notice.log | Bro notices | :bro:type:`Notice::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| notice_alarm.log | The alarm stream | :bro:enum:`Notice::ACTION_ALARM`| ++----------------------------+---------------------------------------+---------------------------------+ +| signatures.log | Signature matches | :bro:type:`Signatures::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| traceroute.log | Traceroute detection | :bro:type:`Traceroute::Info` | ++----------------------------+---------------------------------------+---------------------------------+ + + +Network Observations +-------------------- + ++----------------------------+---------------------------------------+---------------------------------+ +| Log File | Description | Field Descriptions | ++============================+=======================================+=================================+ +| app_stats.log | Web app usage statistics | :bro:type:`AppStats::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| known_certs.log | SSL certificates | :bro:type:`Known::CertsInfo` | ++----------------------------+---------------------------------------+---------------------------------+ +| known_devices.log | MAC addresses of devices on the | :bro:type:`Known::DevicesInfo` | +| | network | | ++----------------------------+---------------------------------------+---------------------------------+ +| known_hosts.log | Hosts that have completed TCP | :bro:type:`Known::HostsInfo` | +| | handshakes | | ++----------------------------+---------------------------------------+---------------------------------+ +| known_modbus.log | Modbus masters and slaves | :bro:type:`Known::ModbusInfo` | ++----------------------------+---------------------------------------+---------------------------------+ +| known_services.log | Services running on hosts | :bro:type:`Known::ServicesInfo` | ++----------------------------+---------------------------------------+---------------------------------+ +| software.log | Software being used on the network | :bro:type:`Software::Info` | ++----------------------------+---------------------------------------+---------------------------------+ + +Miscellaneous +------------- + ++----------------------------+---------------------------------------+---------------------------------+ +| Log File | Description | Field Descriptions | ++============================+=======================================+=================================+ +| barnyard2.log | Alerts received from Barnyard2 | :bro:type:`Barnyard2::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| dpd.log | Dynamic protocol detection failures | :bro:type:`DPD::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| unified2.log | Interprets Snort's unified output | :bro:type:`Unified2::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| weird.log | Unexpected network-level activity | :bro:type:`Weird::Info` | ++----------------------------+---------------------------------------+---------------------------------+ + +Bro Diagnostics +--------------- + ++----------------------------+---------------------------------------+---------------------------------+ +| Log File | Description | Field Descriptions | ++============================+=======================================+=================================+ +| capture_loss.log | Packet loss rate | :bro:type:`CaptureLoss::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| cluster.log | Bro cluster messages | :bro:type:`Cluster::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| communication.log | Communication events between Bro or | :bro:type:`Communication::Info` | +| | Broccoli instances | | ++----------------------------+---------------------------------------+---------------------------------+ +| loaded_scripts.log | Shows all scripts loaded by Bro | :bro:type:`LoadedScripts::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| packet_filter.log | List packet filters that were applied | :bro:type:`PacketFilter::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| prof.log | Profiling statistics (to create this | N/A | +| | log, load policy/misc/profiling.bro) | | ++----------------------------+---------------------------------------+---------------------------------+ +| reporter.log | Internal error/warning/info messages | :bro:type:`Reporter::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| stats.log | Memory/event/packet/lag statistics | :bro:type:`Stats::Info` | ++----------------------------+---------------------------------------+---------------------------------+ +| stderr.log | Captures standard error when Bro is | N/A | +| | started from BroControl | | ++----------------------------+---------------------------------------+---------------------------------+ +| stdout.log | Captures standard output when Bro is | N/A | +| | started from BroControl | | ++----------------------------+---------------------------------------+---------------------------------+ + diff --git a/doc/script-reference/operators.rst b/doc/script-reference/operators.rst new file mode 100644 index 0000000000..9442102b52 --- /dev/null +++ b/doc/script-reference/operators.rst @@ -0,0 +1,191 @@ +Operators +========= + +The Bro scripting language supports the following operators. Note that +each data type only supports a subset of these operators. For more +details, see the documentation about the `data types `_. + +Relational operators +-------------------- + +The relational operators evaluate to type :bro:type:`bool`. + ++------------------------------+--------------+ +| Name | Syntax | ++==============================+==============+ +| Equality | *a* == *b* | ++------------------------------+--------------+ +| Inequality | *a* != *b* | ++------------------------------+--------------+ +| Less than | *a* < *b* | ++------------------------------+--------------+ +| Less than or equal | *a* <= *b* | ++------------------------------+--------------+ +| Greater than | *a* > *b* | ++------------------------------+--------------+ +| Greater than or equal | *a* >= *b* | ++------------------------------+--------------+ + + +Logical operators +----------------- + +The logical operators require operands of type :bro:type:`bool`, and +evaluate to type :bro:type:`bool`. + ++------------------------------+--------------+ +| Name | Syntax | ++==============================+==============+ +| Logical AND | *a* && *b* | ++------------------------------+--------------+ +| Logical OR | *a* \|\| *b* | ++------------------------------+--------------+ +| Logical NOT | ! *a* | ++------------------------------+--------------+ + + +Arithmetic operators +-------------------- + ++------------------------------+-------------+-------------------------------+ +| Name | Syntax | Notes | ++==============================+=============+===============================+ +| Addition | *a* + *b* | For :bro:type:`string` | +| | | operands, this performs | +| | | string concatenation. | ++------------------------------+-------------+-------------------------------+ +| Subtraction | *a* - *b* | | ++------------------------------+-------------+-------------------------------+ +| Multiplication | *a* \* *b* | | ++------------------------------+-------------+-------------------------------+ +| Division | *a* / *b* | For :bro:type:`int` or | +| | | :bro:type:`count` operands, | +| | | the fractional part of the | +| | | result is dropped. | ++------------------------------+-------------+-------------------------------+ +| Modulo | *a* % *b* | Operand types cannot be | +| | | "double". | ++------------------------------+-------------+-------------------------------+ +| Unary plus | \+ *a* | | ++------------------------------+-------------+-------------------------------+ +| Unary minus | \- *a* | | ++------------------------------+-------------+-------------------------------+ +| Pre-increment | ++ *a* | Operand type cannot be | +| | | "double". | ++------------------------------+-------------+-------------------------------+ +| Pre-decrement | ``--`` *a* | Operand type cannot be | +| | | "double". | ++------------------------------+-------------+-------------------------------+ +| Absolute value | \| *a* \| | If operand is | +| | | :bro:type:`string`, | +| | | :bro:type:`set`, | +| | | :bro:type:`table`, or | +| | | :bro:type:`vector`, this | +| | | evaluates to number | +| | | of elements. | ++------------------------------+-------------+-------------------------------+ + + +Assignment operators +-------------------- + +The assignment operators evaluate to the result of the assignment. + ++------------------------------+-------------+ +| Name | Syntax | ++==============================+=============+ +| Assignment | *a* = *b* | ++------------------------------+-------------+ +| Addition assignment | *a* += *b* | ++------------------------------+-------------+ +| Subtraction assignment | *a* -= *b* | ++------------------------------+-------------+ + + +Record field operators +---------------------- + +The record field operators take a :bro:type:`record` as the first operand, +and a field name as the second operand. For both operators, the specified +field name must be in the declaration of the record type. + ++------------------------------+-------------+-------------------------------+ +| Name | Syntax | Notes | ++==============================+=============+===============================+ +| Field access | *a* $ *b* | | ++------------------------------+-------------+-------------------------------+ +| Field value existence test | *a* ?$ *b* | Evaluates to type | +| | | :bro:type:`bool`. | +| | | True if the specified field | +| | | has been assigned a value, or | +| | | false if not. | ++------------------------------+-------------+-------------------------------+ + + +Other operators +--------------- + ++--------------------------------+-------------------+------------------------+ +| Name | Syntax | Notes | ++================================+===================+========================+ +| Membership test | *a* in *b* |Evaluates to type | +| | |:bro:type:`bool`. Do not| +| | |confuse this use of "in"| +| | |with that used in a | +| | |:bro:keyword:`for` | +| | |statement. | ++--------------------------------+-------------------+------------------------+ +| Non-membership test | *a* !in *b* |This is the logical NOT | +| | |of the "in" operator. | +| | |For example: "a !in b" | +| | |is equivalent to | +| | |"!(a in b)". | ++--------------------------------+-------------------+------------------------+ +| Table or vector element access | *a* [ *b* ] |This operator can also | +| | |be used with a | +| | |:bro:type:`set`, but | +| | |only with the | +| | |:bro:keyword:`add` or | +| | |:bro:keyword:`delete` | +| | |statement. | ++--------------------------------+-------------------+------------------------+ +| Substring extraction | *a* [ *b* : *c* ] |See the | +| | |:bro:type:`string` type | +| | |for more details. | ++--------------------------------+-------------------+------------------------+ +| Create a deep copy | copy ( *a* ) |This is relevant only | +| | |for data types that are | +| | |assigned by reference, | +| | |such as | +| | |:bro:type:`vector`, | +| | |:bro:type:`set`, | +| | |:bro:type:`table`, | +| | |and :bro:type:`record`. | ++--------------------------------+-------------------+------------------------+ +| Module namespace access | *a* \:\: *b* |The first operand is the| +| | |module name, and the | +| | |second operand is an | +| | |identifier that refers | +| | |to a global variable, | +| | |enumeration constant, or| +| | |user-defined type that | +| | |was exported from the | +| | |module. | ++--------------------------------+-------------------+------------------------+ +| Conditional | *a* ? *b* : *c* |The first operand must | +| | |evaluate to type | +| | |:bro:type:`bool`. | +| | |If true, then the | +| | |second expression is | +| | |evaluated and is the | +| | |result of the entire | +| | |expression. Otherwise, | +| | |the third expression is | +| | |evaluated and is the | +| | |result of the entire | +| | |expression. The types of| +| | |the second and third | +| | |operands must be | +| | |compatible. | ++--------------------------------+-------------------+------------------------+ + diff --git a/doc/script-reference/statements.rst b/doc/script-reference/statements.rst new file mode 100644 index 0000000000..064310ca45 --- /dev/null +++ b/doc/script-reference/statements.rst @@ -0,0 +1,602 @@ +Declarations and Statements +=========================== + +The Bro scripting language supports the following declarations and +statements. + + +Declarations +~~~~~~~~~~~~ + ++----------------------------+-----------------------------+ +| Name | Description | ++============================+=============================+ +| :bro:keyword:`module` | Change the current module | ++----------------------------+-----------------------------+ +| :bro:keyword:`export` | Export identifiers from the | +| | current module | ++----------------------------+-----------------------------+ +| :bro:keyword:`global` | Declare a global variable | ++----------------------------+-----------------------------+ +| :bro:keyword:`const` | Declare a constant | ++----------------------------+-----------------------------+ +| :bro:keyword:`type` | Declare a user-defined type | ++----------------------------+-----------------------------+ +| :bro:keyword:`redef` | Redefine a global value or | +| | extend a user-defined type | ++----------------------------+-----------------------------+ +| `function/event/hook`_ | Declare a function, event | +| | handler, or hook | ++----------------------------+-----------------------------+ + +Statements +~~~~~~~~~~ + ++----------------------------+------------------------+ +| Name | Description | ++============================+========================+ +| :bro:keyword:`local` | Declare a local | +| | variable | ++----------------------------+------------------------+ +| :bro:keyword:`add`, | Add or delete | +| :bro:keyword:`delete` | elements | ++----------------------------+------------------------+ +| :bro:keyword:`print` | Print to stdout or a | +| | file | ++----------------------------+------------------------+ +| :bro:keyword:`for`, | Loop over each | +| :bro:keyword:`next`, | element in a container | +| :bro:keyword:`break` | object | ++----------------------------+------------------------+ +| :bro:keyword:`if` | Evaluate boolean | +| | expression and if true,| +| | execute a statement | ++----------------------------+------------------------+ +| :bro:keyword:`switch`, | Evaluate expression | +| :bro:keyword:`break`, | and execute statement | +| :bro:keyword:`fallthrough` | with a matching value | ++----------------------------+------------------------+ +| :bro:keyword:`when` | Asynchronous execution | ++----------------------------+------------------------+ +| :bro:keyword:`event`, | Invoke or schedule | +| :bro:keyword:`schedule` | an event handler | ++----------------------------+------------------------+ +| :bro:keyword:`return` | Return from function, | +| | hook, or event handler | ++----------------------------+------------------------+ + +Declarations +------------ + +The following global declarations cannot occur within a function, hook, or +event handler. Also, these declarations cannot appear after any statements +that are outside of a function, hook, or event handler. + +.. bro:keyword:: module + + The "module" keyword is used to change the current module. This + affects the scope of any subsequently declared global identifiers. + + Example:: + + module mymodule; + + If a global identifier is declared after a "module" declaration, + then its scope ends at the end of the current Bro script or at the + next "module" declaration, whichever comes first. However, if a + global identifier is declared after a "module" declaration, but inside + an :bro:keyword:`export` block, then its scope ends at the end of the + last loaded Bro script, but it must be referenced using the namespace + operator (``::``) in other modules. + + There can be any number of "module" declarations in a Bro script. + The same "module" declaration can appear in any number of different + Bro scripts. + + +.. bro:keyword:: export + + An "export" block contains one or more declarations + (no statements are allowed in an "export" block) that the current + module is exporting. This enables these global identifiers to be visible + in other modules (but not prior to their declaration) via the namespace + operator (``::``). See the :bro:keyword:`module` keyword for a more + detailed explanation. + + Example:: + + export { + redef enum Log::ID += { LOG }; + + type Info: record { + ts: time &log; + uid: string &log; + }; + + const conntime = 30sec &redef; + } + + Note that the braces in an "export" block are always required + (they do not indicate a compound statement). Also, no semicolon is + needed to terminate an "export" block. + +.. bro:keyword:: global + + Variables declared with the "global" keyword will be global. + If a type is not specified, then an initializer is required so that + the type can be inferred. Likewise, if an initializer is not supplied, + then the type must be specified. Example:: + + global pi = 3.14; + global hosts: set[addr]; + global ciphers: table[string] of string = table(); + + Variable declarations outside of any function, hook, or event handler are + required to use this keyword (unless they are declared with the + :bro:keyword:`const` keyword). Definitions of functions, hooks, and + event handlers are not allowed to use the "global" + keyword (they already have global scope), except function declarations + where no function body is supplied use the "global" keyword. + + The scope of a global variable begins where the declaration is located, + and extends through all remaining Bro scripts that are loaded (however, + see the :bro:keyword:`module` keyword for an explanation of how modules + change the visibility of global identifiers). + + +.. bro:keyword:: const + + A variable declared with the "const" keyword will be constant. + Variables declared as constant are required to be initialized at the + time of declaration. Example:: + + const pi = 3.14; + const ssh_port: port = 22/tcp; + + The value of a constant cannot be changed later (the only + exception is if the variable is global and has the :bro:attr:`&redef` + attribute, then its value can be changed only with a :bro:keyword:`redef`). + + The scope of a constant is local if the declaration is in a + function, hook, or event handler, and global otherwise. + Note that the "const" keyword cannot be used with either the "local" + or "global" keywords (i.e., "const" replaces "local" and "global"). + + +.. bro:keyword:: type + + The "type" keyword is used to declare a user-defined type. The name + of this new type has global scope and can be used anywhere a built-in + type name can occur. + + The "type" keyword is most commonly used when defining a + :bro:type:`record` or an :bro:type:`enum`, but is also useful when + dealing with more complex types. + + Example:: + + type mytype: table[count] of table[addr, port] of string; + global myvar: mytype; + +.. bro:keyword:: redef + + There are three ways that "redef" can be used: to change the value of + a global variable, to extend a record type or enum type, or to specify + a new event handler body that replaces all those that were previously + defined. + + If you're using "redef" to change a global variable (defined using either + :bro:keyword:`const` or :bro:keyword:`global`), then the variable that you + want to change must have the :bro:attr:`&redef` attribute. If the variable + you're changing is a table, set, or pattern, you can use ``+=`` to add + new elements, or you can use ``=`` to specify a new value (all previous + contents of the object are removed). If the variable you're changing is a + set or table, then you can use the ``-=`` operator to remove the + specified elements (nothing happens for specified elements that don't + exist). If the variable you are changing is not a table, set, or pattern, + then you must use the ``=`` operator. + + Examples:: + + redef pi = 3.14; + + If you're using "redef" to extend a record or enum, then you must + use the ``+=`` assignment operator. + For an enum, you can add more enumeration constants, and for a record + you can add more record fields (however, each record field in the "redef" + must have either the :bro:attr:`&optional` or :bro:attr:`&default` + attribute). + + Examples:: + + redef enum color += { Blue, Red }; + redef record MyRecord += { n2:int &optional; s2:string &optional; }; + + If you're using "redef" to specify a new event handler body that + replaces all those that were previously defined (i.e., any subsequently + defined event handler body will not be affected by this "redef"), then + the syntax is the same as a regular event handler definition except for + the presence of the "redef" keyword. + + Example:: + + redef event myevent(s:string) { print "Redefined", s; } + + +.. _function/event/hook: + +**function/event/hook** + For details on how to declare a :bro:type:`function`, + :bro:type:`event` handler, or :bro:type:`hook`, + see the documentation for those types. + + +Statements +---------- + +Each statement in a Bro script must be terminated with a semicolon (with a +few exceptions noted below). An individual statement can span multiple +lines. + +All statements (except those contained within a function, hook, or event +handler) must appear after all global declarations. + +Here are the statements that the Bro scripting language supports. + +.. bro:keyword:: add + + The "add" statement is used to add an element to a :bro:type:`set`. + Nothing happens if the specified element already exists in the set. + + Example:: + + local myset: set[string]; + add myset["test"]; + +.. bro:keyword:: break + + The "break" statement is used to break out of a :bro:keyword:`switch` or + :bro:keyword:`for` statement. + + +.. bro:keyword:: delete + + The "delete" statement is used to remove an element from a + :bro:type:`set` or :bro:type:`table`. Nothing happens if the + specified element does not exist in the set or table. + + Example:: + + local myset = set("this", "test"); + local mytable = table(["key1"] = 80/tcp, ["key2"] = 53/udp); + delete myset["test"]; + delete mytable["key1"]; + +.. bro:keyword:: event + + The "event" statement immediately queues invocation of an event handler. + + Example:: + + event myevent("test", 5); + +.. bro:keyword:: fallthrough + + The "fallthrough" statement can be used as the last statement in a + "case" block to indicate that execution should continue into the + next "case" or "default" label. + + For an example, see the :bro:keyword:`switch` statement. + +.. bro:keyword:: for + + A "for" loop iterates over each element in a string, set, vector, or + table and executes a statement for each iteration. + + For each iteration of the loop, a loop variable will be assigned to an + element if the expression evaluates to a string or set, or an index if + the expression evaluates to a vector or table. Then the statement + is executed. However, the statement will not be executed if the expression + evaluates to an object with no elements. + + If the expression is a table or a set with more than one index, then the + loop variable must be specified as a comma-separated list of different + loop variables (one for each index), enclosed in brackets. + + A :bro:keyword:`break` statement can be used at any time to immediately + terminate the "for" loop, and a :bro:keyword:`next` statement can be + used to skip to the next loop iteration. + + Note that the loop variable in a "for" statement is not allowed to be + a global variable, and it does not need to be declared prior to the "for" + statement. The type will be inferred from the elements of the + expression. + + Example:: + + local myset = set(80/tcp, 81/tcp); + local mytable = table([10.0.0.1, 80/tcp]="s1", [10.0.0.2, 81/tcp]="s2"); + + for (p in myset) + print p; + + for ([i,j] in mytable) { + if (mytable[i,j] == "done") + break; + if (mytable[i,j] == "skip") + next; + print i,j; + } + + +.. bro:keyword:: if + + Evaluates a given expression, which must yield a :bro:type:`bool` value. + If true, then a specified statement is executed. If false, then + the statement is not executed. Example:: + + if ( x == 2 ) print "x is 2"; + + + However, if the expression evaluates to false and if an "else" is + provided, then the statement following the "else" is executed. Example:: + + if ( x == 2 ) + print "x is 2"; + else + print "x is not 2"; + +.. bro:keyword:: local + + A variable declared with the "local" keyword will be local. If a type + is not specified, then an initializer is required so that the type can + be inferred. Likewise, if an initializer is not supplied, then the + type must be specified. + + Examples:: + + local x1 = 5.7; + local x2: double; + local x3: double = 5.7; + + Variable declarations inside a function, hook, or event handler are + required to use this keyword (the only two exceptions are variables + declared with :bro:keyword:`const`, and variables implicitly declared in a + :bro:keyword:`for` statement). + + The scope of a local variable starts at the location where it is declared + and persists to the end of the function, hook, + or event handler in which it is declared (this is true even if the + local variable was declared within a `compound statement`_ or is the loop + variable in a "for" statement). + + +.. bro:keyword:: next + + The "next" statement can only appear within a :bro:keyword:`for` loop. + It causes execution to skip to the next iteration. + + For an example, see the :bro:keyword:`for` statement. + +.. bro:keyword:: print + + The "print" statement takes a comma-separated list of one or more + expressions. Each expression in the list is evaluated and then converted + to a string. Then each string is printed, with each string separated by + a comma in the output. + + Examples:: + + print 3.14; + print "Results", x, y; + + By default, the "print" statement writes to the standard + output (stdout). However, if the first expression is of type + :bro:type:`file`, then "print" writes to that file. + + If a string contains non-printable characters (i.e., byte values that are + not in the range 32 - 126), then the "print" statement converts each + non-printable character to an escape sequence before it is printed. + + For more control over how the strings are formatted, see the :bro:id:`fmt` + function. + +.. bro:keyword:: return + + The "return" statement immediately exits the current function, hook, or + event handler. For a function, the specified expression (if any) is + evaluated and returned. A "return" statement in a hook or event handler + cannot return a value because event handlers and hooks do not have + return types. + + Examples:: + + function my_func(): string + { + return "done"; + } + + event my_event(n: count) + { + if ( n == 0 ) return; + + print n; + } + + There is a special form of the "return" statement that is only allowed + in functions. Syntactically, it looks like a :bro:keyword:`when` statement + immediately preceded by the "return" keyword. This form of the "return" + statement is used to specify a function that delays its result (such a + function can only be called in the expression of a :bro:keyword:`when` + statement). The function returns at the time the "when" + statement's condition becomes true, and the function returns the value + that the "when" statement's body returns (or if the condition does + not become true within the specified timeout interval, then the function + returns the value that the "timeout" block returns). + + Example:: + + global X: table[string] of count; + + function a() : count + { + # This delays until condition becomes true. + return when ( "a" in X ) + { + return X["a"]; + } + timeout 30 sec + { + return 0; + } + } + + event bro_init() + { + # Installs a trigger which fires if a() returns 42. + when ( a() == 42 ) + print "expected result"; + + print "Waiting for a() to return..."; + X["a"] = 42; + } + + +.. bro:keyword:: schedule + + The "schedule" statement is used to raise a specified event with + specified parameters at a later time specified as an :bro:type:`interval`. + + Example:: + + schedule 30sec { myevent(x, y, z) }; + + Note that the braces are always required (they do not indicate a + `compound statement`_). + + Note that "schedule" is actually an expression that returns a value + of type "timer", but in practice the return value is not used. + +.. bro:keyword:: switch + + A "switch" statement evaluates a given expression and jumps to + the first "case" label which contains a matching value (the result of the + expression must be type-compatible with all of the values in all of the + "case" labels). If there is no matching value, then execution jumps to + the "default" label instead, and if there is no "default" label then + execution jumps out of the "switch" block. + + Here is an example (assuming that "get_day_of_week" is a + function that returns a string):: + + switch get_day_of_week() + { + case "Sa", "Su": + print "weekend"; + fallthrough; + case "Mo", "Tu", "We", "Th", "Fr": + print "valid result"; + break; + default: + print "invalid result"; + break; + } + + A "switch" block can have any number of "case" labels, and one + optional "default" label. + + A "case" label can have a comma-separated list of + more than one value. A value in a "case" label can be an expression, + but it must be a constant expression (i.e., the expression can consist + only of constants). + + Each "case" and the "default" block must + end with either a :bro:keyword:`break`, :bro:keyword:`fallthrough`, or + :bro:keyword:`return` statement (although "return" is allowed only + if the "switch" statement is inside a function, hook, or event handler). + If a "case" (or "default") block contain more than one statement, then + there is no need to wrap them in braces. + + Note that the braces in a "switch" statement are always required (these + do not indicate the presence of a `compound statement`_), and that no + semicolon is needed at the end of a "switch" statement. + + +.. bro:keyword:: when + + Evaluates a given expression, which must result in a value of type + :bro:type:`bool`. When the value of the expression becomes available + and if the result is true, then a specified statement is executed. + + In the following example, if the expression evaluates to true, then + the "print" statement is executed:: + + when ( (local x = foo()) && x == 42 ) + print x; + + However, if a timeout is specified, and if the expression does not + evaluate to true within the specified timeout interval, then the + statement following the "timeout" keyword is executed:: + + when ( (local x = foo()) && x == 42 ) + print x; + timeout 5sec { + print "timeout"; + } + + Note that when a timeout is specified the braces are + always required (these do not indicate a `compound statement`_). + + The expression in a "when" statement can contain a declaration of a local + variable but only if the declaration is written in the form + "local *var* = *init*" (example: "local x = myfunction()"). This form + of a local declaration is actually an expression, the result of which + is always a boolean true value. + + The expression in a "when" statement can contain an asynchronous function + call such as :bro:id:`lookup_hostname` (in fact, this is the only place + such a function can be called), but it can also contain an ordinary + function call. When an asynchronous function call is in the expression, + then Bro will continue processing statements in the script following + the "when" statement, and when the result of the function call is available + Bro will finish evaluating the expression in the "when" statement. + See the :bro:keyword:`return` statement for an explanation of how to + create an asynchronous function in a Bro script. + + +.. _compound statement: + +**compound statement** + A compound statement is created by wrapping zero or more statements in + braces ``{ }``. Individual statements inside the braces need to be + terminated by a semicolon, but a semicolon is not needed at the end + (outside of the braces) of a compound statement. + + A compound statement is required in order to execute more than one + statement in the body of a :bro:keyword:`for`, :bro:keyword:`if`, or + :bro:keyword:`when` statement. + + Example:: + + if ( x == 2 ) { + print "x is 2"; + ++x; + } + + Note that there are other places in the Bro scripting language that use + braces, but that do not indicate the presence of a compound + statement (these are noted in the documentation). + +.. _null: + +**null statement** + The null statement (executing it has no effect) consists of just a + semicolon. This might be useful during testing or debugging a Bro script + in places where a statement is required, but it is probably not useful + otherwise. + + Example:: + + if ( x == 2 ) + ; + diff --git a/doc/script-reference/builtins.rst b/doc/script-reference/types.rst similarity index 60% rename from doc/script-reference/builtins.rst rename to doc/script-reference/types.rst index 85e9cd14c8..cc601db75f 100644 --- a/doc/script-reference/builtins.rst +++ b/doc/script-reference/types.rst @@ -1,106 +1,128 @@ -Types and Attributes -==================== - Types ------ +===== -Every value in a Bro script has a type (see below for a list of all built-in -types). Although Bro variables have static types (meaning that their type -is fixed), their type is inferred from the value to which they are -initially assigned when the variable is declared without an explicit type -name. +The Bro scripting language supports the following built-in types: -Automatic conversions happen when a binary operator has operands of -different types. Automatic conversions are limited to converting between -numeric types. The numeric types are ``int``, ``count``, and ``double`` -(``bool`` is not a numeric type). -When an automatic conversion occurs, values are promoted to the "highest" -type in the expression. In general, this promotion follows a simple -hierarchy: ``double`` is highest, ``int`` comes next, and ``count`` is -lowest. ++-----------------------+--------------------+ +| Name | Description | ++=======================+====================+ +| :bro:type:`bool` | Boolean | ++-----------------------+--------------------+ +| :bro:type:`count`, | Numeric types | +| :bro:type:`int`, | | +| :bro:type:`double` | | ++-----------------------+--------------------+ +| :bro:type:`time`, | Time types | +| :bro:type:`interval` | | ++-----------------------+--------------------+ +| :bro:type:`string` | String | ++-----------------------+--------------------+ +| :bro:type:`pattern` | Regular expression | ++-----------------------+--------------------+ +| :bro:type:`port`, | Network types | +| :bro:type:`addr`, | | +| :bro:type:`subnet` | | ++-----------------------+--------------------+ +| :bro:type:`enum` | Enumeration | +| | (user-defined type)| ++-----------------------+--------------------+ +| :bro:type:`table`, | Container types | +| :bro:type:`set`, | | +| :bro:type:`vector`, | | +| :bro:type:`record` | | ++-----------------------+--------------------+ +| :bro:type:`function`, | Executable types | +| :bro:type:`event`, | | +| :bro:type:`hook` | | ++-----------------------+--------------------+ +| :bro:type:`file` | File type (only | +| | for writing) | ++-----------------------+--------------------+ +| :bro:type:`opaque` | Opaque type (for | +| | some built-in | +| | functions) | ++-----------------------+--------------------+ +| :bro:type:`any` | Any type (for | +| | functions or | +| | containers) | ++-----------------------+--------------------+ -The Bro scripting language supports the following built-in types. - -.. bro:type:: void - - An internal Bro type (i.e., "void" is not a reserved keyword in the Bro - scripting language) representing the absence of a return type for a - function. +Here is a more detailed description of each type: .. bro:type:: bool Reflects a value with one of two meanings: true or false. The two - ``bool`` constants are ``T`` and ``F``. + "bool" constants are ``T`` and ``F``. - The ``bool`` type supports the following operators: equality/inequality + The "bool" type supports the following operators: equality/inequality (``==``, ``!=``), logical and/or (``&&``, ``||``), logical - negation (``!``), and absolute value (where ``|T|`` is 1, and ``|F|`` is 0). + negation (``!``), and absolute value (where ``|T|`` is 1, and ``|F|`` is 0, + and in both cases the result type is :bro:type:`count`). .. bro:type:: int - A numeric type representing a 64-bit signed integer. An ``int`` constant - is a string of digits preceded by a ``+`` or ``-`` sign, e.g. + A numeric type representing a 64-bit signed integer. An "int" constant + is a string of digits preceded by a "+" or "-" sign, e.g. ``-42`` or ``+5`` (the "+" sign is optional but see note about type - inferencing below). An ``int`` constant can also be written in + inferencing below). An "int" constant can also be written in hexadecimal notation (in which case "0x" must be between the sign and the hex digits), e.g. ``-0xFF`` or ``+0xabc123``. - The ``int`` type supports the following operators: arithmetic + The "int" type supports the following operators: arithmetic operators (``+``, ``-``, ``*``, ``/``, ``%``), comparison operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), assignment operators (``=``, ``+=``, ``-=``), pre-increment (``++``), pre-decrement - (``--``), and absolute value (e.g., ``|-3|`` is 3). + (``--``), unary plus and minus (``+``, ``-``), and absolute value + (e.g., ``|-3|`` is 3, but the result type is :bro:type:`count`). When using type inferencing use care so that the - intended type is inferred, e.g. ``local size_difference = 0`` will - infer :bro:type:`count`, while ``local size_difference = +0`` - will infer :bro:type:`int`. + intended type is inferred, e.g. "local size_difference = 0" will + infer ":bro:type:`count`", while "local size_difference = +0" + will infer "int". .. bro:type:: count - A numeric type representing a 64-bit unsigned integer. A ``count`` - constant is a string of digits, e.g. ``1234`` or ``0``. A ``count`` + A numeric type representing a 64-bit unsigned integer. A "count" + constant is a string of digits, e.g. ``1234`` or ``0``. A "count" can also be written in hexadecimal notation (in which case "0x" must precede the hex digits), e.g. ``0xff`` or ``0xABC123``. - The ``count`` type supports the same operators as the :bro:type:`int` - type. A unary plus or minus applied to a ``count`` results in an ``int``. - -.. bro:type:: counter - - An alias to :bro:type:`count`. + The "count" type supports the same operators as the ":bro:type:`int`" + type, but a unary plus or minus applied to a "count" results in an + "int". .. bro:type:: double A numeric type representing a double-precision floating-point number. Floating-point constants are written as a string of digits with an optional decimal point, optional scale-factor in scientific - notation, and optional ``+`` or ``-`` sign. Examples are ``-1234``, + notation, and optional "+" or "-" sign. Examples are ``-1234``, ``-1234e0``, ``3.14159``, and ``.003E-23``. - The ``double`` type supports the following operators: arithmetic + The "double" type supports the following operators: arithmetic operators (``+``, ``-``, ``*``, ``/``), comparison operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), assignment operators - (``=``, ``+=``, ``-=``), and absolute value (e.g., ``|-3.14|`` is 3.14). + (``=``, ``+=``, ``-=``), unary plus and minus (``+``, ``-``), and + absolute value (e.g., ``|-3.14|`` is 3.14). When using type inferencing use care so that the - intended type is inferred, e.g. ``local size_difference = 5`` will - infer :bro:type:`count`, while ``local size_difference = 5.0`` - will infer :bro:type:`double`. + intended type is inferred, e.g. "local size_difference = 5" will + infer ":bro:type:`count`", while "local size_difference = 5.0" + will infer "double". .. bro:type:: time A temporal type representing an absolute time. There is currently no way to specify a ``time`` constant, but one can use the :bro:id:`double_to_time`, :bro:id:`current_time`, or :bro:id:`network_time` - built-in functions to assign a value to a ``time``-typed variable. + built-in functions to assign a value to a ``time``-typed variable. Time values support the comparison operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``). A ``time`` value can be subtracted from - another ``time`` value to produce an ``interval`` value. An ``interval`` - value can be added to, or subtracted from, a ``time`` value to produce a - ``time`` value. The absolute value of a ``time`` value is a ``double`` - with the same numeric value. + another ``time`` value to produce an :bro:type:`interval` value. An + ``interval`` value can be added to, or subtracted from, a ``time`` value + to produce a ``time`` value. The absolute value of a ``time`` value is + a :bro:type:`double` with the same numeric value. .. bro:type:: interval @@ -115,52 +137,58 @@ The Bro scripting language supports the following built-in types. ``3.5mins``. An ``interval`` can also be negated, for example ``-12 hr`` represents "twelve hours in the past". - Intervals support addition and subtraction. Intervals also support - division (in which case the result is a ``double`` value), the - comparison operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), - and the assignment operators (``=``, ``+=``, ``-=``). Also, an - ``interval`` can be multiplied or divided by an arithmetic type - (``count``, ``int``, or ``double``) to produce an ``interval`` value. - The absolute value of an ``interval`` is a ``double`` value equal to the - number of seconds in the ``interval`` (e.g., ``|-1 min|`` is 60). + Intervals support addition and subtraction, the comparison operators + (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``), the assignment + operators (``=``, ``+=``, ``-=``), and unary plus and minus (``+``, ``-``). + + Intervals also support division (in which case the result is a + :bro:type:`double` value). An ``interval`` can be multiplied or divided + by an arithmetic type (``count``, ``int``, or ``double``) to produce + an ``interval`` value. The absolute value of an ``interval`` is a + ``double`` value equal to the number of seconds in the ``interval`` + (e.g., ``|-1 min|`` is 60.0). .. bro:type:: string - A type used to hold character-string values which represent text. - String constants are created by enclosing text in double quotes (") - and the backslash character (\\) introduces escape sequences (all of - the C-style escape sequences are supported). + A type used to hold character-string values which represent text, although + strings in a Bro script can actually contain any arbitrary binary data. + + String constants are created by enclosing text within a pair of double + quotes ("). A string constant cannot span multiple lines in a Bro script. + The backslash character (\\) introduces escape sequences. The + following escape sequences are recognized: ``\n``, ``\t``, ``\v``, ``\b``, + ``\r``, ``\f``, ``\a``, ``\ooo`` (where each 'o' is an octal digit), + ``\xhh`` (where each 'h' is a hexadecimal digit). For escape sequences + that don't match any of these, Bro will just remove the backslash (so + to represent a literal backslash in a string constant, you just use + two consecutive backslashes). Strings support concatenation (``+``), and assignment (``=``, ``+=``). Strings also support the comparison operators (``==``, ``!=``, ``<``, ``<=``, ``>``, ``>=``). The number of characters in a string can be found by enclosing the string within pipe characters (e.g., ``|"abc"|`` - is 3). - - The subscript operator can extract an individual character or a substring - of a string (string indexing is zero-based, but an index of - -1 refers to the last character in the string, and -2 refers to the - second-to-last character, etc.). When extracting a substring, the - starting and ending index values are separated by a colon. For example:: - - local orig = "0123456789"; - local third_char = orig[2]; - local last_char = orig[-1]; - local first_three_chars = orig[0:2]; - - Substring searching can be performed using the "in" or "!in" + is 3). Substring searching can be performed using the "in" or "!in" operators (e.g., "bar" in "foobar" yields true). - Note that Bro represents strings internally as a count and vector of - bytes rather than a NUL-terminated byte string (although string - constants are also automatically NUL-terminated). This is because - network traffic can easily introduce NULs into strings either by - nature of an application, inadvertently, or maliciously. And while - NULs are allowed in Bro strings, when present in strings passed as - arguments to many functions, a run-time error can occur as their - presence likely indicates a sort of problem. In that case, the - string will also only be represented to the user as the literal - "" string. + The subscript operator can extract a substring of a string. To do this, + specify the starting index to extract (if the starting index is omitted, + then zero is assumed), followed by a colon and index + one past the last character to extract (if the last index is omitted, + then the extracted substring will go to the end of the original string). + However, if both the colon and last index are omitted, then a string of + length one is extracted. String indexing is zero-based, but an index + of -1 refers to the last character in the string, and -2 refers to the + second-to-last character, etc. Here are a few examples:: + + local orig = "0123456789"; + local second_char = orig[1]; + local last_char = orig[-1]; + local first_two_chars = orig[:2]; + local last_two_chars = orig[8:]; + local no_first_and_last = orig[1:9]; + + Note that the subscript operator cannot be used to modify a string (i.e., + it cannot be on the left side of an assignment operator). .. bro:type:: pattern @@ -174,7 +202,7 @@ The Bro scripting language supports the following built-in types. and embedded. In exact matching the ``==`` equality relational operator is used - with one :bro:type:`pattern` operand and one :bro:type:`string` + with one "pattern" operand and one ":bro:type:`string`" operand (order of operands does not matter) to check whether the full string exactly matches the pattern. In exact matching, the ``^`` beginning-of-line and ``$`` end-of-line anchors are redundant since @@ -190,8 +218,8 @@ The Bro scripting language supports the following built-in types. yields false. The ``!=`` operator would yield the negation of ``==``. In embedded matching the ``in`` operator is used with one - :bro:type:`pattern` operand (which must be on the left-hand side) and - one :bro:type:`string` operand, but tests whether the pattern + "pattern" operand (which must be on the left-hand side) and + one ":bro:type:`string`" operand, but tests whether the pattern appears anywhere within the given string. For example:: /foo|bar/ in "foobar" @@ -203,27 +231,12 @@ The Bro scripting language supports the following built-in types. is false since "oob" does not appear at the start of "foobar". The ``!in`` operator would yield the negation of ``in``. -.. bro:type:: enum - - A type allowing the specification of a set of related values that - have no further structure. An example declaration: - - .. code:: bro - - type color: enum { Red, White, Blue, }; - - The last comma after ``Blue`` is optional. - - The only operations allowed on enumerations are equality comparisons - (``==``, ``!=``) and assignment (``=``). - Enumerations do not have associated values or ordering. - .. bro:type:: port - A type representing transport-level port numbers. Besides TCP and + A type representing transport-level port numbers (besides TCP and UDP ports, there is a concept of an ICMP "port" where the source port is the ICMP message type and the destination port the ICMP - message code. A ``port`` constant is written as an unsigned integer + message code). A ``port`` constant is written as an unsigned integer followed by one of ``/tcp``, ``/udp``, ``/icmp``, or ``/unknown``. Ports support the comparison operators (``==``, ``!=``, ``<``, ``<=``, @@ -255,14 +268,6 @@ The Bro scripting language supports the following built-in types. address) are treated internally as IPv4 addresses (for example, ``[::ffff:192.168.1.100]`` is equal to ``192.168.1.100``). - Hostname constants can also be used, but since a hostname can - correspond to multiple IP addresses, the type of such a variable is a - :bro:type:`set` of :bro:type:`addr` elements. For example: - - .. code:: bro - - local a = www.google.com; - Addresses can be compared for equality (``==``, ``!=``), and also for ordering (``<``, ``<=``, ``>``, ``>=``). The absolute value of an address gives the size in bits (32 for IPv4, and 128 for IPv6). @@ -285,9 +290,17 @@ The Bro scripting language supports the following built-in types. if ( a in s ) print "true"; - Note that you can check if a given ``addr`` is IPv4 or IPv6 using + You can check if a given ``addr`` is IPv4 or IPv6 using the :bro:id:`is_v4_addr` and :bro:id:`is_v6_addr` built-in functions. + Note that hostname constants can also be used, but since a hostname can + correspond to multiple IP addresses, the type of such a variable is + "set[addr]". For example: + + .. code:: bro + + local a = www.google.com; + .. bro:type:: subnet A type representing a block of IP addresses in CIDR notation. A @@ -296,13 +309,24 @@ The Bro scripting language supports the following built-in types. number. For example, ``192.168.0.0/16`` or ``[fe80::]/64``. Subnets can be compared for equality (``==``, ``!=``). An - :bro:type:`addr` can be checked for inclusion in a subnet using - the "in" or "!in" operators. + "addr" can be checked for inclusion in a subnet using + the ``in`` or ``!in`` operators. -.. bro:type:: any +.. bro:type:: enum - Used to bypass strong typing. For example, a function can take an - argument of type ``any`` when it may be of different types. + A type allowing the specification of a set of related values that + have no further structure. An example declaration: + + .. code:: bro + + type color: enum { Red, White, Blue, }; + + The last comma after ``Blue`` is optional. Both the type name ``color`` + and the individual values (``Red``, etc.) have global scope. + + Enumerations do not have associated values or ordering. + The only operations allowed on enumerations are equality comparisons + (``==``, ``!=``) and assignment (``=``). .. bro:type:: table @@ -316,24 +340,25 @@ The Bro scripting language supports the following built-in types. table [ type^+ ] of type - where *type^+* is one or more types, separated by commas. For example: + where *type^+* is one or more types, separated by commas. + For example: .. code:: bro global a: table[count] of string; - declares a table indexed by :bro:type:`count` values and yielding - :bro:type:`string` values. The yield type can also be more complex: + declares a table indexed by "count" values and yielding + "string" values. The yield type can also be more complex: .. code:: bro global a: table[count] of table[addr, port] of string; - which declares a table indexed by :bro:type:`count` and yielding - another :bro:type:`table` which is indexed by an :bro:type:`addr` - and :bro:type:`port` to yield a :bro:type:`string`. + which declares a table indexed by "count" and yielding + another "table" which is indexed by an "addr" + and "port" to yield a "string". - Initialization of tables occurs by enclosing a set of initializers within + One way to initialize a table is by enclosing a set of initializers within braces, for example: .. code:: bro @@ -343,18 +368,17 @@ The Bro scripting language supports the following built-in types. [5] = "five", }; - A table constructor (equivalent to above example) can also be used - to create a table: + A table constructor can also be used to create a table: .. code:: bro - global t2: table[count] of string = table( - [11] = "eleven", - [5] = "five" + global t2 = table( + [192.168.0.2, 22/tcp] = "ssh", + [192.168.0.3, 80/tcp] = "http" ); Table constructors can also be explicitly named by a type, which is - useful for when a more complex index type could otherwise be + useful when a more complex index type could otherwise be ambiguous: .. code:: bro @@ -381,17 +405,7 @@ The Bro scripting language supports the following built-in types. if ( 13 in t ) ... - - Iterate over tables with a ``for`` loop: - - .. code:: bro - - local t: table[count] of string; - for ( n in t ) - ... - - local services: table[addr, port] of string; - for ( [a, p] in services ) + if ( [192.168.0.2, 22/tcp] in t2 ) ... Add or overwrite individual table elements by assignment: @@ -400,7 +414,7 @@ The Bro scripting language supports the following built-in types. t[13] = "thirteen"; - Remove individual table elements with ``delete``: + Remove individual table elements with :bro:keyword:`delete`: .. code:: bro @@ -416,6 +430,9 @@ The Bro scripting language supports the following built-in types. |t| + See the :bro:keyword:`for` statement for info on how to iterate over + the elements in a table. + .. bro:type:: set A set is like a :bro:type:`table`, but it is a collection of indices @@ -426,25 +443,22 @@ The Bro scripting language supports the following built-in types. where *type^+* is one or more types separated by commas. - Sets are initialized by listing elements enclosed by curly braces: + Sets can be initialized by listing elements enclosed by curly braces: .. code:: bro global s: set[port] = { 21/tcp, 23/tcp, 80/tcp, 443/tcp }; global s2: set[port, string] = { [21/tcp, "ftp"], [23/tcp, "telnet"] }; - The types are explicitly shown in the example above, but they could - have been left to type inference. - A set constructor (equivalent to above example) can also be used to create a set: .. code:: bro - global s3: set[port] = set(21/tcp, 23/tcp, 80/tcp, 443/tcp); + global s3 = set(21/tcp, 23/tcp, 80/tcp, 443/tcp); Set constructors can also be explicitly named by a type, which is - useful for when a more complex index type could otherwise be + useful when a more complex index type could otherwise be ambiguous: .. code:: bro @@ -465,18 +479,10 @@ The Bro scripting language supports the following built-in types. if ( 21/tcp in s ) ... - if ( 21/tcp !in s ) + if ( [21/tcp, "ftp"] !in s2 ) ... - Iterate over a set with a ``for`` loop: - - .. code:: bro - - local s: set[port]; - for ( p in s ) - ... - - Elements are added with ``add``: + Elements are added with :bro:keyword:`add`: .. code:: bro @@ -485,7 +491,7 @@ The Bro scripting language supports the following built-in types. Nothing happens if the element with value ``22/tcp`` was already present in the set. - And removed with ``delete``: + And removed with :bro:keyword:`delete`: .. code:: bro @@ -501,6 +507,9 @@ The Bro scripting language supports the following built-in types. |s| + See the :bro:keyword:`for` statement for info on how to iterate over + the elements in a set. + .. bro:type:: vector A vector is like a :bro:type:`table`, except it's always indexed by a @@ -515,7 +524,7 @@ The Bro scripting language supports the following built-in types. .. code:: bro - global v: vector of string = vector("one", "two", "three"); + local v = vector("one", "two", "three"); Vector constructors can also be explicitly named by a type, which is useful for when a more complex yield type could otherwise be @@ -539,14 +548,6 @@ The Bro scripting language supports the following built-in types. print v[2]; - Iterate over a vector with a ``for`` loop: - - .. code:: bro - - local v: vector of string; - for ( n in v ) - ... - An element can be added to a vector by assigning the value (a value that already exists at that index will be overwritten): @@ -577,11 +578,17 @@ The Bro scripting language supports the following built-in types. The resulting vector of bool is the logical "and" (or logical "or") of each element of the operand vectors. + See the :bro:keyword:`for` statement for info on how to iterate over + the elements in a vector. + .. bro:type:: record - A ``record`` is a collection of values. Each value has a field name + A "record" is a collection of values. Each value has a field name and a type. Values do not need to have the same type and the types - have no restrictions. An example record type definition: + have no restrictions. Field names must follow the same syntax as + regular variable names (except that field names are allowed to be the + same as local or global variables). An example record type + definition: .. code:: bro @@ -590,85 +597,44 @@ The Bro scripting language supports the following built-in types. s: string &optional; }; - Access to a record field uses the dollar sign (``$``) operator: - - .. code:: bro - - global r: MyRecordType; - r$c = 13; - - Record assignment can be done field by field or as a whole like: - - .. code:: bro - - r = [$c = 13, $s = "thirteen"]; - + Records can be initialized or assigned as a whole in three different ways. When assigning a whole record value, all fields that are not :bro:attr:`&optional` or have a :bro:attr:`&default` attribute must - be specified. - - To test for existence of a field that is :bro:attr:`&optional`, use the - ``?$`` operator: + be specified. First, there's a constructor syntax: .. code:: bro - if ( r?$s ) - ... - - Records can also be created using a constructor syntax: - - .. code:: bro - - global r2: MyRecordType = record($c = 7); + local r: MyRecordType = record($c = 7); And the constructor can be explicitly named by type, too, which - is arguably more readable code: + is arguably more readable: .. code:: bro - global r3 = MyRecordType($c = 42); + local r = MyRecordType($c = 42); -.. bro:type:: opaque - - A data type whose actual representation/implementation is - intentionally hidden, but whose values may be passed to certain - functions that can actually access the internal/hidden resources. - Opaque types are differentiated from each other by qualifying them - like ``opaque of md5`` or ``opaque of sha1``. Any valid identifier - can be used as the type qualifier. - - An example use of this type is the set of built-in functions which - perform hashing: + And the third way is like this: .. code:: bro - local handle: opaque of md5 = md5_hash_init(); - md5_hash_update(handle, "test"); - md5_hash_update(handle, "testing"); - print md5_hash_finish(handle); + local r: MyRecordType = [$c = 13, $s = "thirteen"]; - Here the opaque type is used to provide a handle to a particular - resource which is calculating an MD5 checksum incrementally over - time, but the details of that resource aren't relevant, it's only - necessary to have a handle as a way of identifying it and - distinguishing it from other such resources. - -.. bro:type:: file - - Bro supports writing to files, but not reading from them. Files - can be opened using either the :bro:id:`open` or :bro:id:`open_for_append` - built-in functions, and closed using the :bro:id:`close` built-in - function. For example, declare, open, and write to a file - and finally close it like: + Access to a record field uses the dollar sign (``$``) operator, and + record fields can be assigned with this: .. code:: bro - global f: file = open("myfile"); - print f, "hello, world"; - close(f); + local r: MyRecordType; + r$c = 13; - Writing to files like this for logging usually isn't recommended, for better - logging support see :doc:`/frameworks/logging`. + To test if a field that is :bro:attr:`&optional` has been assigned a + value, use the ``?$`` operator (it returns a :bro:type:`bool` value of + ``T`` if the field has been assigned a value, or ``F`` if not): + + .. code:: bro + + if ( r ?$ s ) + ... .. bro:type:: function @@ -700,6 +666,16 @@ The Bro scripting language supports the following built-in types. type, but when it is, the return type and argument list (including the name of each argument) must match exactly. + Here is an example function that takes no parameters and does not + return a value: + + .. code:: bro + + function my_func() + { + print "my_func"; + } + Function types don't need to have a name and can be assigned anonymously: .. code:: bro @@ -742,9 +718,20 @@ The Bro scripting language supports the following built-in types. Event handlers are nearly identical in both syntax and semantics to a :bro:type:`function`, with the two differences being that event handlers have no return type since they never return a value, and - you cannot call an event handler. Instead of directly calling an - event handler from a script, event handler bodies are executed when - they are invoked by one of three different methods: + you cannot call an event handler. + + Example: + + .. code:: bro + + event my_event(r: bool, s: string) + { + print "my_event", r, s; + } + + Instead of directly calling an event handler from a script, event + handler bodies are executed when they are invoked by one of three + different methods: - From the event engine @@ -765,7 +752,7 @@ The Bro scripting language supports the following built-in types. This assumes that ``password_exposed`` was previously declared as an event handler type with compatible arguments. - - Via the ``schedule`` expression in a script + - Via the :bro:keyword:`schedule` expression in a script This delays the invocation of event handlers until some time in the future. For example: @@ -789,8 +776,8 @@ The Bro scripting language supports the following built-in types. immediate and they do not get scheduled through an event queue. Also, a unique feature of a hook is that a given hook handler body can short-circuit the execution of remaining hook handlers simply by - exiting from the body as a result of a ``break`` statement (as - opposed to a ``return`` or just reaching the end of the body). + exiting from the body as a result of a :bro:keyword:`break` statement (as + opposed to a :bro:keyword:`return` or just reaching the end of the body). A hook type is declared like:: @@ -859,142 +846,60 @@ The Bro scripting language supports the following built-in types. executed due to one handler body exiting as a result of a ``break`` statement. -Attributes ----------- +.. bro:type:: file -Attributes occur at the end of type/event declarations and change their -behavior. The syntax is ``&key`` or ``&key=val``, e.g., ``type T: -set[count] &read_expire=5min`` or ``event foo() &priority=-3``. The Bro -scripting language supports the following built-in attributes. + Bro supports writing to files, but not reading from them (to read from + files see the :doc:`/frameworks/input`). Files + can be opened using either the :bro:id:`open` or :bro:id:`open_for_append` + built-in functions, and closed using the :bro:id:`close` built-in + function. For example, declare, open, and write to a file and finally + close it like: -.. bro:attr:: &optional + .. code:: bro - Allows a record field to be missing. For example the type ``record { - a: addr; b: port &optional; }`` could be instantiated both as - singleton ``[$a=127.0.0.1]`` or pair ``[$a=127.0.0.1, $b=80/tcp]``. + local f = open("myfile"); + print f, "hello, world"; + close(f); -.. bro:attr:: &default + Writing to files like this for logging usually isn't recommended, for better + logging support see :doc:`/frameworks/logging`. - Uses a default value for a record field, a function/hook/event - parameter, or container elements. For example, ``table[int] of - string &default="foo"`` would create a table that returns the - :bro:type:`string` ``"foo"`` for any non-existing index. +.. bro:type:: opaque -.. bro:attr:: &redef + A data type whose actual representation/implementation is + intentionally hidden, but whose values may be passed to certain + built-in functions that can actually access the internal/hidden resources. + Opaque types are differentiated from each other by qualifying them + like "opaque of md5" or "opaque of sha1". - Allows for redefinition of initial object values. This is typically - used with constants, for example, ``const clever = T &redef;`` would - allow the constant to be redefined at some later point during script - execution. + An example use of this type is the set of built-in functions which + perform hashing: -.. bro:attr:: &rotate_interval + .. code:: bro - Rotates a file after a specified interval. + local handle = md5_hash_init(); + md5_hash_update(handle, "test"); + md5_hash_update(handle, "testing"); + print md5_hash_finish(handle); -.. bro:attr:: &rotate_size + Here the opaque type is used to provide a handle to a particular + resource which is calculating an MD5 hash incrementally over + time, but the details of that resource aren't relevant, it's only + necessary to have a handle as a way of identifying it and + distinguishing it from other such resources. - Rotates a file after it has reached a given size in bytes. +.. bro:type:: any -.. bro:attr:: &add_func + Used to bypass strong typing. For example, a function can take an + argument of type ``any`` when it may be of different types. + The only operation allowed on a variable of type ``any`` is assignment. - Can be applied to an identifier with &redef to specify a function to - be called any time a "redef += ..." declaration is parsed. The - function takes two arguments of the same type as the identifier, the first - being the old value of the variable and the second being the new - value given after the "+=" operator in the "redef" declaration. The - return value of the function will be the actual new value of the - variable after the "redef" declaration is parsed. + Note that users aren't expected to use this type. It's provided mainly + for use by some built-in functions and scripts included with Bro. -.. bro:attr:: &delete_func +.. bro:type:: void - Same as &add_func, except for "redef" declarations that use the "-=" - operator. + An internal Bro type (i.e., "void" is not a reserved keyword in the Bro + scripting language) representing the absence of a return type for a + function. -.. bro:attr:: &expire_func - - Called right before a container element expires. The function's - first parameter is of the same type of the container and the second - parameter the same type of the container's index. The return - value is an :bro:type:`interval` indicating the amount of additional - time to wait before expiring the container element at the given - index (which will trigger another execution of this function). - -.. bro:attr:: &read_expire - - Specifies a read expiration timeout for container elements. That is, - the element expires after the given amount of time since the last - time it has been read. Note that a write also counts as a read. - -.. bro:attr:: &write_expire - - Specifies a write expiration timeout for container elements. That - is, the element expires after the given amount of time since the - last time it has been written. - -.. bro:attr:: &create_expire - - Specifies a creation expiration timeout for container elements. That - is, the element expires after the given amount of time since it has - been inserted into the container, regardless of any reads or writes. - -.. bro:attr:: &persistent - - Makes a variable persistent, i.e., its value is written to disk (per - default at shutdown time). - -.. bro:attr:: &synchronized - - Synchronizes variable accesses across nodes. The value of a - ``&synchronized`` variable is automatically propagated to all peers - when it changes. - -.. bro:attr:: &encrypt - - Encrypts files right before writing them to disk. - -.. TODO: needs to be documented in more detail. - -.. bro:attr:: &raw_output - - Opens a file in raw mode, i.e., non-ASCII characters are not - escaped. - -.. bro:attr:: &mergeable - - Prefers set union to assignment for synchronized state. This - attribute is used in conjunction with :bro:attr:`&synchronized` - container types: when the same container is updated at two peers - with different value, the propagation of the state causes a race - condition, where the last update succeeds. This can cause - inconsistencies and can be avoided by unifying the two sets, rather - than merely overwriting the old value. - -.. bro:attr:: &priority - - Specifies the execution priority (as a signed integer) of a hook or - event handler. Higher values are executed before lower ones. The - default value is 0. - -.. bro:attr:: &group - - Groups event handlers such that those in the same group can be - jointly activated or deactivated. - -.. bro:attr:: &log - - Writes a record field to the associated log stream. - -.. bro:attr:: &error_handler - - Internally set on the events that are associated with the reporter - framework: :bro:id:`reporter_info`, :bro:id:`reporter_warning`, and - :bro:id:`reporter_error`. It prevents any handlers of those events - from being able to generate reporter messages that go through any of - those events (i.e., it prevents an infinite event recursion). Instead, - such nested reporter messages are output to stderr. - -.. bro:attr:: &type_column - - Used by the input framework. It can be used on columns of type - :bro:type:`port` and specifies the name of an additional column in - the input file which specifies the protocol of the port (tcp/udp/icmp). diff --git a/doc/scripting/data_struct_vector_declaration.bro b/doc/scripting/data_struct_vector_declaration.bro index d64754b97b..6d684d09b1 100644 --- a/doc/scripting/data_struct_vector_declaration.bro +++ b/doc/scripting/data_struct_vector_declaration.bro @@ -10,6 +10,6 @@ event bro_init() print fmt("contents of v1: %s", v1); print fmt("length of v1: %d", |v1|); - print fmt("contents of v1: %s", v2); + print fmt("contents of v2: %s", v2); print fmt("length of v2: %d", |v2|); } diff --git a/doc/scripting/data_type_pattern_01.bro b/doc/scripting/data_type_pattern_01.bro index 08378dd124..e57650a589 100644 --- a/doc/scripting/data_type_pattern_01.bro +++ b/doc/scripting/data_type_pattern_01.bro @@ -1,6 +1,6 @@ event bro_init() { - local test_string = "The quick brown fox jumped over the lazy dog."; + local test_string = "The quick brown fox jumps over the lazy dog."; local test_pattern = /quick|lazy/; if ( test_pattern in test_string ) diff --git a/doc/scripting/index.rst b/doc/scripting/index.rst index efb9aced15..559e131b8f 100644 --- a/doc/scripting/index.rst +++ b/doc/scripting/index.rst @@ -730,7 +730,7 @@ Bro supports ``usec``, ``msec``, ``sec``, ``min``, ``hr``, or ``day`` which repr microseconds, milliseconds, seconds, minutes, hours, and days respectively. In fact, the interval data type allows for a surprising amount of variation in its definitions. There can be a space between -the numeric constant or they can crammed together like a temporal +the numeric constant or they can be crammed together like a temporal portmanteau. The time unit can be either singular or plural. All of this adds up to to the fact that both ``42hrs`` and ``42 hr`` are perfectly valid and logically equivalent in Bro. The point, however, @@ -819,7 +819,7 @@ with the ``typedef`` and ``struct`` keywords, Bro allows you to cobble together new data types to suit the needs of your situation. When combined with the ``type`` keyword, ``record`` can generate a -composite type. We have, in fact, already encountered a a complex +composite type. We have, in fact, already encountered a complex example of the ``record`` data type in the earlier sections, the :bro:type:`connection` record passed to many events. Another one, :bro:type:`Conn::Info`, which corresponds to the fields logged into @@ -1014,8 +1014,8 @@ remaining logs to factor.log. :lines: 38-62 :linenos: -To dynamically alter the file in which a stream writes its logs a -filter can specify function returns a string to be used as the +To dynamically alter the file in which a stream writes its logs, a +filter can specify a function that returns a string to be used as the filename for the current call to ``Log::write``. The definition for this function has to take as its parameters a ``Log::ID`` called id, a string called ``path`` and the appropriate record type for the logs called diff --git a/scripts/base/frameworks/files/magic/general.sig b/scripts/base/frameworks/files/magic/general.sig index 20276f69ac..a11e4a05e4 100644 --- a/scripts/base/frameworks/files/magic/general.sig +++ b/scripts/base/frameworks/files/magic/general.sig @@ -9,3 +9,8 @@ signature file-tar { file-magic /([[:print:]\x00]){100}(([[:digit:]\x00\x20]){8}){3}/ file-mime "application/x-tar", 150 } + +signature file-swf { + file-magic /(F|C|Z)WS/ + file-mime "application/x-shockwave-flash", 60 +} \ No newline at end of file diff --git a/scripts/base/frameworks/files/magic/libmagic.sig b/scripts/base/frameworks/files/magic/libmagic.sig index 55486d411e..a4604959c3 100644 --- a/scripts/base/frameworks/files/magic/libmagic.sig +++ b/scripts/base/frameworks/files/magic/libmagic.sig @@ -2769,19 +2769,6 @@ signature file-magic-auto408 { file-magic /(.{512})(\xec\xa5\xc1)/ } -# >0 string,=FWS (len=3), ["Macromedia Flash data,"], swap_endian=0 -# >>3 byte&,x, ["version %d"], swap_endian=0 -signature file-magic-auto409 { - file-mime "application/x-shockwave-flash", 1 - file-magic /(FWS)(.{1})/ -} - -# >0 string,=CWS (len=3), ["Macromedia Flash data (compressed),"], swap_endian=0 -signature file-magic-auto410 { - file-mime "application/x-shockwave-flash", 60 - file-magic /(CWS)/ -} - # >0 regex/20,=^\.[A-Za-z0-9][A-Za-z0-9][ \t] (len=29), ["troff or preprocessor input text"], swap_endian=0 signature file-magic-auto411 { file-mime "text/troff", 59 diff --git a/scripts/base/frameworks/files/main.bro b/scripts/base/frameworks/files/main.bro index 5f0e5a2e00..d680c467b6 100644 --- a/scripts/base/frameworks/files/main.bro +++ b/scripts/base/frameworks/files/main.bro @@ -264,10 +264,10 @@ export { ## Returns: The set of MIME types. global registered_mime_types: function(tag: Analyzer::Tag) : set[string]; - ## Returns a table of all MIME-type-to-analyzer mappings currently registered. + ## Returns a table of all MIME-type-to-analyzer mappings currently registered. ## - ## Returns: A table mapping each analyzer to the set of MIME types registered for - ## it. + ## Returns: A table mapping each analyzer to the set of MIME types + ## registered for it. global all_registered_mime_types: function() : table[Analyzer::Tag] of set[string]; ## Event that can be handled to access the Info record as it is sent on diff --git a/scripts/base/frameworks/input/main.bro b/scripts/base/frameworks/input/main.bro index 7ca6e1c72b..fa766ba27b 100644 --- a/scripts/base/frameworks/input/main.bro +++ b/scripts/base/frameworks/input/main.bro @@ -4,6 +4,17 @@ module Input; export { + type Event: enum { + EVENT_NEW = 0, + EVENT_CHANGED = 1, + EVENT_REMOVED = 2, + }; + + type Mode: enum { + MANUAL = 0, + REREAD = 1, + STREAM = 2 + }; ## The default input reader used. Defaults to `READER_ASCII`. const default_reader = READER_ASCII &redef; diff --git a/scripts/base/frameworks/intel/main.bro b/scripts/base/frameworks/intel/main.bro index fb3a9a3613..5b31dd964e 100644 --- a/scripts/base/frameworks/intel/main.bro +++ b/scripts/base/frameworks/intel/main.bro @@ -81,6 +81,9 @@ export { ## Where the data was discovered. where: Where &log; + ## The name of the node where the match was discovered. + node: string &optional &log; + ## If the data was discovered within a connection, the ## connection record should go here to give context to the data. conn: connection &optional; @@ -240,6 +243,11 @@ function Intel::seen(s: Seen) s$indicator_type = Intel::ADDR; } + if ( ! s?$node ) + { + s$node = peer_description; + } + if ( have_full_data ) { local items = get_items(s); diff --git a/scripts/base/frameworks/logging/__load__.bro b/scripts/base/frameworks/logging/__load__.bro index 44293de5cb..74c7362846 100644 --- a/scripts/base/frameworks/logging/__load__.bro +++ b/scripts/base/frameworks/logging/__load__.bro @@ -1,7 +1,5 @@ @load ./main @load ./postprocessors @load ./writers/ascii -@load ./writers/dataseries @load ./writers/sqlite -@load ./writers/elasticsearch @load ./writers/none diff --git a/scripts/base/frameworks/logging/main.bro b/scripts/base/frameworks/logging/main.bro index c068866f63..bf1affcb01 100644 --- a/scripts/base/frameworks/logging/main.bro +++ b/scripts/base/frameworks/logging/main.bro @@ -5,9 +5,15 @@ module Log; -# Log::ID and Log::Writer are defined in types.bif due to circular dependencies. - export { + ## Type that defines an ID unique to each log stream. Scripts creating new log + ## streams need to redef this enum to add their own specific log ID. The log ID + ## implicitly determines the default name of the generated log file. + type Log::ID: enum { + ## Dummy place-holder. + UNKNOWN + }; + ## If true, local logging is by default enabled for all filters. const enable_local_logging = T &redef; @@ -27,13 +33,13 @@ export { const set_separator = "," &redef; ## String to use for empty fields. This should be different from - ## *unset_field* to make the output unambiguous. + ## *unset_field* to make the output unambiguous. ## Can be overwritten by individual writers. const empty_field = "(empty)" &redef; ## String to use for an unset &optional field. ## Can be overwritten by individual writers. - const unset_field = "-" &redef; + const unset_field = "-" &redef; ## Type defining the content of a logging stream. type Stream: record { diff --git a/scripts/base/frameworks/logging/writers/dataseries.bro b/scripts/base/frameworks/logging/writers/dataseries.bro deleted file mode 100644 index b24601d6b9..0000000000 --- a/scripts/base/frameworks/logging/writers/dataseries.bro +++ /dev/null @@ -1,60 +0,0 @@ -##! Interface for the DataSeries log writer. - -module LogDataSeries; - -export { - ## Compression to use with the DS output file. Options are: - ## - ## 'none' -- No compression. - ## 'lzf' -- LZF compression (very quick, but leads to larger output files). - ## 'lzo' -- LZO compression (very fast decompression times). - ## 'zlib' -- GZIP compression (slower than LZF, but also produces smaller output). - ## 'bz2' -- BZIP2 compression (slower than GZIP, but also produces smaller output). - const compression = "zlib" &redef; - - ## The extent buffer size. - ## Larger values here lead to better compression and more efficient writes, - ## but also increase the lag between the time events are received and - ## the time they are actually written to disk. - const extent_size = 65536 &redef; - - ## Should we dump the XML schema we use for this DS file to disk? - ## If yes, the XML schema shares the name of the logfile, but has - ## an XML ending. - const dump_schema = F &redef; - - ## How many threads should DataSeries spawn to perform compression? - ## Note that this dictates the number of threads per log stream. If - ## you're using a lot of streams, you may want to keep this number - ## relatively small. - ## - ## Default value is 1, which will spawn one thread / stream. - ## - ## Maximum is 128, minimum is 1. - const num_threads = 1 &redef; - - ## Should time be stored as an integer or a double? - ## Storing time as a double leads to possible precision issues and - ## can (significantly) increase the size of the resulting DS log. - ## That said, timestamps stored in double form are consistent - ## with the rest of Bro, including the standard ASCII log. Hence, we - ## use them by default. - const use_integer_for_time = F &redef; -} - -# Default function to postprocess a rotated DataSeries log file. It moves the -# rotated file to a new name that includes a timestamp with the opening time, -# and then runs the writer's default postprocessor command on it. -function default_rotation_postprocessor_func(info: Log::RotationInfo) : bool - { - # Move file to name including both opening and closing time. - local dst = fmt("%s.%s.ds", info$path, - strftime(Log::default_rotation_date_format, info$open)); - - system(fmt("/bin/mv %s %s", info$fname, dst)); - - # Run default postprocessor. - return Log::run_rotation_postprocessor_cmd(info, dst); - } - -redef Log::default_rotation_postprocessors += { [Log::WRITER_DATASERIES] = default_rotation_postprocessor_func }; diff --git a/scripts/base/frameworks/logging/writers/elasticsearch.bro b/scripts/base/frameworks/logging/writers/elasticsearch.bro deleted file mode 100644 index 6292876bd0..0000000000 --- a/scripts/base/frameworks/logging/writers/elasticsearch.bro +++ /dev/null @@ -1,48 +0,0 @@ -##! Log writer for sending logs to an ElasticSearch server. -##! -##! Note: This module is in testing and is not yet considered stable! -##! -##! There is one known memory issue. If your elasticsearch server is -##! running slowly and taking too long to return from bulk insert -##! requests, the message queue to the writer thread will continue -##! growing larger and larger giving the appearance of a memory leak. - -module LogElasticSearch; - -export { - ## Name of the ES cluster. - const cluster_name = "elasticsearch" &redef; - - ## ES server. - const server_host = "127.0.0.1" &redef; - - ## ES port. - const server_port = 9200 &redef; - - ## Name of the ES index. - const index_prefix = "bro" &redef; - - ## The ES type prefix comes before the name of the related log. - ## e.g. prefix = "bro\_" would create types of bro_dns, bro_software, etc. - const type_prefix = "" &redef; - - ## The time before an ElasticSearch transfer will timeout. Note that - ## the fractional part of the timeout will be ignored. In particular, - ## time specifications less than a second result in a timeout value of - ## 0, which means "no timeout." - const transfer_timeout = 2secs; - - ## The batch size is the number of messages that will be queued up before - ## they are sent to be bulk indexed. - const max_batch_size = 1000 &redef; - - ## The maximum amount of wall-clock time that is allowed to pass without - ## finishing a bulk log send. This represents the maximum delay you - ## would like to have with your logs before they are sent to ElasticSearch. - const max_batch_interval = 1min &redef; - - ## The maximum byte size for a buffered JSON string to send to the bulk - ## insert API. - const max_byte_size = 1024 * 1024 &redef; -} - diff --git a/scripts/base/init-bare.bro b/scripts/base/init-bare.bro index b08b266f34..efce524fc5 100644 --- a/scripts/base/init-bare.bro +++ b/scripts/base/init-bare.bro @@ -2485,8 +2485,7 @@ type http_message_stat: record { header_length: count; }; -## Maximum number of HTTP entity data delivered to events. The amount of data -## can be limited for better performance, zero disables truncation. +## Maximum number of HTTP entity data delivered to events. ## ## .. bro:see:: http_entity_data skip_http_entity_data skip_http_data global http_entity_data_delivery_size = 1500 &redef; @@ -3364,9 +3363,6 @@ const global_hash_seed: string = "" &redef; ## The maximum is currently 128 bits. const bits_per_uid: count = 96 &redef; -# Load BiFs defined by plugins. -@load base/bif/plugins - # Load these frameworks here because they use fairly deep integration with # BiFs and script-land defined types. @load base/frameworks/logging @@ -3375,3 +3371,7 @@ const bits_per_uid: count = 96 &redef; @load base/frameworks/files @load base/bif + +# Load BiFs defined by plugins. +@load base/bif/plugins + diff --git a/scripts/base/protocols/ssl/consts.bro b/scripts/base/protocols/ssl/consts.bro index a19aaecbe5..54952988f0 100644 --- a/scripts/base/protocols/ssl/consts.bro +++ b/scripts/base/protocols/ssl/consts.bro @@ -30,6 +30,7 @@ export { const HELLO_REQUEST = 0; const CLIENT_HELLO = 1; const SERVER_HELLO = 2; + const HELLO_VERIFY_REQUEST = 3; # RFC 6347 const SESSION_TICKET = 4; # RFC 5077 const CERTIFICATE = 11; const SERVER_KEY_EXCHANGE = 12; @@ -40,6 +41,7 @@ export { const FINISHED = 20; const CERTIFICATE_URL = 21; # RFC 3546 const CERTIFICATE_STATUS = 22; # RFC 3546 + const SUPPLEMENTAL_DATA = 23; # RFC 4680 ## Mapping between numeric codes and human readable strings for alert ## levels. @@ -112,7 +114,8 @@ export { [19] = "client_certificate_type", [20] = "server_certificate_type", [21] = "padding", # temporary till 2015-03-12 - [22] = "encrypt_then_mac", # temporary till 2015-06-05 + [22] = "encrypt_then_mac", + [23] = "extended_master_secret", # temporary till 2015-09-26 [35] = "SessionTicket TLS", [40] = "extended_random", [13172] = "next_protocol_negotiation", diff --git a/scripts/policy/tuning/logs-to-elasticsearch.bro b/scripts/policy/tuning/logs-to-elasticsearch.bro deleted file mode 100644 index b770b8f84b..0000000000 --- a/scripts/policy/tuning/logs-to-elasticsearch.bro +++ /dev/null @@ -1,36 +0,0 @@ -##! Load this script to enable global log output to an ElasticSearch database. - -module LogElasticSearch; - -export { - ## An elasticsearch specific rotation interval. - const rotation_interval = 3hr &redef; - - ## Optionally ignore any :bro:type:`Log::ID` from being sent to - ## ElasticSearch with this script. - const excluded_log_ids: set[Log::ID] &redef; - - ## If you want to explicitly only send certain :bro:type:`Log::ID` - ## streams, add them to this set. If the set remains empty, all will - ## be sent. The :bro:id:`LogElasticSearch::excluded_log_ids` option - ## will remain in effect as well. - const send_logs: set[Log::ID] &redef; -} - -event bro_init() &priority=-5 - { - if ( server_host == "" ) - return; - - for ( stream_id in Log::active_streams ) - { - if ( stream_id in excluded_log_ids || - (|send_logs| > 0 && stream_id !in send_logs) ) - next; - - local filter: Log::Filter = [$name = "default-es", - $writer = Log::WRITER_ELASTICSEARCH, - $interv = LogElasticSearch::rotation_interval]; - Log::add_filter(stream_id, filter); - } - } diff --git a/scripts/test-all-policy.bro b/scripts/test-all-policy.bro index 5ab596dbfb..1146f274bb 100644 --- a/scripts/test-all-policy.bro +++ b/scripts/test-all-policy.bro @@ -98,7 +98,4 @@ @load tuning/defaults/packet-fragments.bro @load tuning/defaults/warnings.bro @load tuning/json-logs.bro -@load tuning/logs-to-elasticsearch.bro @load tuning/track-all-assets.bro - -redef LogElasticSearch::server_host = ""; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index cd65592c74..9f94f8f1e3 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -118,8 +118,6 @@ include(BifCl) set(BIF_SRCS bro.bif - logging.bif - input.bif event.bif const.bif types.bif @@ -156,13 +154,17 @@ set(bro_SUBDIR_LIBS CACHE INTERNAL "subdir libraries" FORCE) set(bro_PLUGIN_LIBS CACHE INTERNAL "plugin libraries" FORCE) add_subdirectory(analyzer) -add_subdirectory(file_analysis) -add_subdirectory(probabilistic) add_subdirectory(broxygen) +add_subdirectory(file_analysis) +add_subdirectory(input) +add_subdirectory(iosource) +add_subdirectory(logging) +add_subdirectory(probabilistic) set(bro_SUBDIRS - ${bro_SUBDIR_LIBS} + # Order is important here. ${bro_PLUGIN_LIBS} + ${bro_SUBDIR_LIBS} ) if ( NOT bro_HAVE_OBJECT_LIBRARIES ) @@ -253,7 +255,6 @@ set(bro_SRCS Anon.cc Attr.cc Base64.cc - BPF_Program.cc Brofiler.cc BroString.cc CCL.cc @@ -278,14 +279,13 @@ set(bro_SRCS EventRegistry.cc Expr.cc File.cc - FlowSrc.cc + Flare.cc Frag.cc Frame.cc Func.cc Hash.cc ID.cc IntSet.cc - IOSource.cc IP.cc IPAddr.cc List.cc @@ -298,7 +298,7 @@ set(bro_SRCS OSFinger.cc PacketFilter.cc PersistenceSerializer.cc - PktSrc.cc + Pipe.cc PolicyFile.cc PrefixTable.cc PriorityQueue.cc @@ -347,24 +347,6 @@ set(bro_SRCS threading/formatters/Ascii.cc threading/formatters/JSON.cc - logging/Manager.cc - logging/WriterBackend.cc - logging/WriterFrontend.cc - logging/writers/Ascii.cc - logging/writers/DataSeries.cc - logging/writers/SQLite.cc - logging/writers/ElasticSearch.cc - logging/writers/None.cc - - input/Manager.cc - input/ReaderBackend.cc - input/ReaderFrontend.cc - input/readers/Ascii.cc - input/readers/Raw.cc - input/readers/Benchmark.cc - input/readers/Binary.cc - input/readers/SQLite.cc - 3rdparty/sqlite3.c plugin/Component.cc diff --git a/src/ChunkedIO.cc b/src/ChunkedIO.cc index 54e2e59575..722b209bcd 100644 --- a/src/ChunkedIO.cc +++ b/src/ChunkedIO.cc @@ -210,6 +210,7 @@ bool ChunkedIOFd::WriteChunk(Chunk* chunk, bool partial) else pending_head = pending_tail = q; + write_flare.Fire(); return Flush(); } @@ -232,6 +233,7 @@ bool ChunkedIOFd::PutIntoWriteBuffer(Chunk* chunk) write_len += len; delete chunk; + write_flare.Fire(); if ( network_time - last_flush > 0.005 ) FlushWriteBuffer(); @@ -269,6 +271,10 @@ bool ChunkedIOFd::FlushWriteBuffer() if ( unsigned(written) == len ) { write_pos = write_len = 0; + + if ( ! pending_head ) + write_flare.Extinguish(); + return true; } @@ -318,7 +324,12 @@ bool ChunkedIOFd::Flush() } } - return FlushWriteBuffer(); + bool rval = FlushWriteBuffer(); + + if ( ! pending_head && write_len == 0 ) + write_flare.Extinguish(); + + return rval; } uint32 ChunkedIOFd::ChunkAvailable() @@ -394,6 +405,9 @@ bool ChunkedIOFd::Read(Chunk** chunk, bool may_block) #ifdef DEBUG_COMMUNICATION AddToBuffer("", true); #endif + if ( ! ChunkAvailable() ) + read_flare.Extinguish(); + return false; } @@ -402,9 +416,15 @@ bool ChunkedIOFd::Read(Chunk** chunk, bool may_block) #ifdef DEBUG_COMMUNICATION AddToBuffer("", true); #endif + read_flare.Extinguish(); return true; } + if ( ChunkAvailable() ) + read_flare.Fire(); + else + read_flare.Extinguish(); + #ifdef DEBUG if ( *chunk ) DBG_LOG(DBG_CHUNKEDIO, "read of size %d %s[%s]", @@ -481,6 +501,9 @@ bool ChunkedIOFd::ReadChunk(Chunk** chunk, bool may_block) read_pos = 0; read_len = bytes_left; + if ( ! ChunkAvailable() ) + read_flare.Extinguish(); + // If allowed, wait a bit for something to read. if ( may_block ) { @@ -607,6 +630,14 @@ bool ChunkedIOFd::IsFillingUp() return stats.pending > MAX_BUFFERED_CHUNKS_SOFT; } +iosource::FD_Set ChunkedIOFd::ExtraReadFDs() const + { + iosource::FD_Set rval; + rval.Insert(write_flare.FD()); + rval.Insert(read_flare.FD()); + return rval; + } + void ChunkedIOFd::Clear() { while ( pending_head ) @@ -618,6 +649,9 @@ void ChunkedIOFd::Clear() } pending_head = pending_tail = 0; + + if ( write_len == 0 ) + write_flare.Extinguish(); } const char* ChunkedIOFd::Error() @@ -830,6 +864,7 @@ bool ChunkedIOSSL::Write(Chunk* chunk) else write_head = write_tail = q; + write_flare.Fire(); Flush(); return true; } @@ -935,6 +970,7 @@ bool ChunkedIOSSL::Flush() write_state = LEN; } + write_flare.Extinguish(); return true; } @@ -1104,6 +1140,13 @@ bool ChunkedIOSSL::IsFillingUp() return false; } +iosource::FD_Set ChunkedIOSSL::ExtraReadFDs() const + { + iosource::FD_Set rval; + rval.Insert(write_flare.FD()); + return rval; + } + void ChunkedIOSSL::Clear() { while ( write_head ) @@ -1114,6 +1157,7 @@ void ChunkedIOSSL::Clear() write_head = next; } write_head = write_tail = 0; + write_flare.Extinguish(); } const char* ChunkedIOSSL::Error() diff --git a/src/ChunkedIO.h b/src/ChunkedIO.h index a9865e4c05..b590453a72 100644 --- a/src/ChunkedIO.h +++ b/src/ChunkedIO.h @@ -6,7 +6,8 @@ #include "config.h" #include "List.h" #include "util.h" - +#include "Flare.h" +#include "iosource/FD_Set.h" #include #ifdef NEED_KRB5_H @@ -95,6 +96,11 @@ public: // Returns underlying fd if available, -1 otherwise. virtual int Fd() { return -1; } + // Returns supplementary file descriptors that become read-ready in order + // to signal that there is some work that can be performed. + virtual iosource::FD_Set ExtraReadFDs() const + { return iosource::FD_Set(); } + // Makes sure that no additional protocol data is written into // the output stream. If this is activated, the output cannot // be read again by any of these classes! @@ -177,6 +183,7 @@ public: virtual void Clear(); virtual bool Eof() { return eof; } virtual int Fd() { return fd; } + virtual iosource::FD_Set ExtraReadFDs() const; virtual void Stats(char* buffer, int length); private: @@ -240,6 +247,8 @@ private: ChunkQueue* pending_tail; pid_t pid; + bro::Flare write_flare; + bro::Flare read_flare; }; // Chunked I/O using an SSL connection. @@ -262,6 +271,7 @@ public: virtual void Clear(); virtual bool Eof() { return eof; } virtual int Fd() { return socket; } + virtual iosource::FD_Set ExtraReadFDs() const; virtual void Stats(char* buffer, int length); private: @@ -303,6 +313,8 @@ private: // One SSL for all connections. static SSL_CTX* ctx; + + bro::Flare write_flare; }; #include @@ -328,6 +340,8 @@ public: virtual bool Eof() { return io->Eof(); } virtual int Fd() { return io->Fd(); } + virtual iosource::FD_Set ExtraReadFDs() const + { return io->ExtraReadFDs(); } virtual void Stats(char* buffer, int length); void EnableCompression(int level) diff --git a/src/DNS_Mgr.cc b/src/DNS_Mgr.cc index 9188d61b96..2c049ba803 100644 --- a/src/DNS_Mgr.cc +++ b/src/DNS_Mgr.cc @@ -35,6 +35,7 @@ #include "Net.h" #include "Var.h" #include "Reporter.h" +#include "iosource/Manager.h" extern "C" { extern int select(int, fd_set *, fd_set *, fd_set *, struct timeval *); @@ -404,17 +405,17 @@ DNS_Mgr::~DNS_Mgr() delete [] dir; } -bool DNS_Mgr::Init() +void DNS_Mgr::InitPostScript() { if ( did_init ) - return true; + return; const char* cache_dir = dir ? dir : "."; if ( mode == DNS_PRIME && ! ensure_dir(cache_dir) ) { did_init = 0; - return false; + return; } cache_name = new char[strlen(cache_dir) + 64]; @@ -433,14 +434,12 @@ bool DNS_Mgr::Init() did_init = 1; - io_sources.Register(this, true); + iosource_mgr->Register(this, true); // We never set idle to false, having the main loop only calling us from // time to time. If we're issuing more DNS requests than we can handle // in this way, we are having problems anyway ... - idle = true; - - return true; + SetIdle(true); } static TableVal* fake_name_lookup_result(const char* name) @@ -1217,9 +1216,10 @@ void DNS_Mgr::IssueAsyncRequests() } } -void DNS_Mgr::GetFds(int* read, int* write, int* except) +void DNS_Mgr::GetFds(iosource::FD_Set* read, iosource::FD_Set* write, + iosource::FD_Set* except) { - *read = nb_dns_fd(nb_dns); + read->Insert(nb_dns_fd(nb_dns)); } double DNS_Mgr::NextTimestamp(double* network_time) diff --git a/src/DNS_Mgr.h b/src/DNS_Mgr.h index 7864505add..d8f420e6cc 100644 --- a/src/DNS_Mgr.h +++ b/src/DNS_Mgr.h @@ -12,7 +12,7 @@ #include "BroList.h" #include "Dict.h" #include "EventHandler.h" -#include "IOSource.h" +#include "iosource/IOSource.h" #include "IPAddr.h" class Val; @@ -40,12 +40,12 @@ enum DNS_MgrMode { // Number of seconds we'll wait for a reply. #define DNS_TIMEOUT 5 -class DNS_Mgr : public IOSource { +class DNS_Mgr : public iosource::IOSource { public: DNS_Mgr(DNS_MgrMode mode); virtual ~DNS_Mgr(); - bool Init(); + void InitPostScript(); void Flush(); // Looks up the address or addresses of the given host, and returns @@ -132,7 +132,8 @@ protected: void DoProcess(bool flush); // IOSource interface. - virtual void GetFds(int* read, int* write, int* except); + virtual void GetFds(iosource::FD_Set* read, iosource::FD_Set* write, + iosource::FD_Set* except); virtual double NextTimestamp(double* network_time); virtual void Process(); virtual const char* Tag() { return "DNS_Mgr"; } diff --git a/src/DebugLogger.cc b/src/DebugLogger.cc index cab2b0494f..6f025e3c2b 100644 --- a/src/DebugLogger.cc +++ b/src/DebugLogger.cc @@ -16,9 +16,10 @@ DebugLogger::Stream DebugLogger::streams[NUM_DBGS] = { { "compressor", 0, false }, {"string", 0, false }, { "notifiers", 0, false }, { "main-loop", 0, false }, { "dpd", 0, false }, { "tm", 0, false }, - { "logging", 0, false }, {"input", 0, false }, + { "logging", 0, false }, {"input", 0, false }, { "threading", 0, false }, { "file_analysis", 0, false }, - { "plugins", 0, false }, { "broxygen", 0, false } + { "plugins", 0, false }, { "broxygen", 0, false }, + { "pktio", 0, false} }; DebugLogger::DebugLogger(const char* filename) diff --git a/src/DebugLogger.h b/src/DebugLogger.h index 845cf7a1a4..9cd09dada1 100644 --- a/src/DebugLogger.h +++ b/src/DebugLogger.h @@ -31,6 +31,7 @@ enum DebugStream { DBG_FILE_ANALYSIS, // File analysis DBG_PLUGINS, // Plugin system DBG_BROXYGEN, // Broxygen + DBG_PKTIO, // Packet sources and dumpers. NUM_DBGS // Has to be last }; diff --git a/src/Flare.cc b/src/Flare.cc new file mode 100644 index 0000000000..dcb5fa2c1f --- /dev/null +++ b/src/Flare.cc @@ -0,0 +1,74 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Flare.h" +#include "Reporter.h" +#include +#include +#include + +using namespace bro; + +Flare::Flare() + : pipe(FD_CLOEXEC, FD_CLOEXEC, O_NONBLOCK, O_NONBLOCK) + { + } + +static void bad_pipe_op(const char* which) + { + char buf[256]; + strerror_r(errno, buf, sizeof(buf)); + reporter->FatalErrorWithCore("unexpected pipe %s failure: %s", which, buf); + } + +void Flare::Fire() + { + char tmp; + + for ( ; ; ) + { + int n = write(pipe.WriteFD(), &tmp, 1); + + if ( n > 0 ) + // Success -- wrote a byte to pipe. + break; + + if ( n < 0 ) + { + if ( errno == EAGAIN ) + // Success: pipe is full and just need at least one byte in it. + break; + + if ( errno == EINTR ) + // Interrupted: try again. + continue; + + bad_pipe_op("write"); + } + + // No error, but didn't write a byte: try again. + } + } + +void Flare::Extinguish() + { + char tmp[256]; + + for ( ; ; ) + { + int n = read(pipe.ReadFD(), &tmp, sizeof(tmp)); + + if ( n >= 0 ) + // Pipe may not be empty yet: try again. + continue; + + if ( errno == EAGAIN ) + // Success: pipe is now empty. + break; + + if ( errno == EINTR ) + // Interrupted: try again. + continue; + + bad_pipe_op("read"); + } + } diff --git a/src/Flare.h b/src/Flare.h new file mode 100644 index 0000000000..fb6d7cae4c --- /dev/null +++ b/src/Flare.h @@ -0,0 +1,44 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef BRO_FLARE_H +#define BRO_FLARE_H + +#include "Pipe.h" + +namespace bro { + +class Flare { +public: + + /** + * Create a flare object that can be used to signal a "ready" status via + * a file descriptor that may be integrated with select(), poll(), etc. + * Not thread-safe, but that should only require Fire()/Extinguish() calls + * to be made mutually exclusive (across all copies of a Flare). + */ + Flare(); + + /** + * @return a file descriptor that will become ready if the flare has been + * Fire()'d and not yet Extinguished()'d. + */ + int FD() const + { return pipe.ReadFD(); } + + /** + * Put the object in the "ready" state. + */ + void Fire(); + + /** + * Take the object out of the "ready" state. + */ + void Extinguish(); + +private: + Pipe pipe; +}; + +} // namespace bro + +#endif // BRO_FLARE_H diff --git a/src/FlowSrc.cc b/src/FlowSrc.cc deleted file mode 100644 index 8eed94fcea..0000000000 --- a/src/FlowSrc.cc +++ /dev/null @@ -1,228 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. -// -// Written by Bernhard Ager, TU Berlin (2006/2007). - -#include -#include -#include -#include - -#include "FlowSrc.h" -#include "Net.h" -#include "analyzer/protocol/netflow/netflow_pac.h" -#include - -FlowSrc::FlowSrc() - { // TODO: v9. - selectable_fd = -1; - idle = false; - data = 0; - pdu_len = -1; - exporter_ip = 0; - current_timestamp = next_timestamp = 0.0; - netflow_analyzer = new binpac::NetFlow::NetFlow_Analyzer(); - } - -FlowSrc::~FlowSrc() - { - delete netflow_analyzer; - } - -void FlowSrc::GetFds(int* read, int* write, int* except) - { - if ( selectable_fd >= 0 ) - *read = selectable_fd; - } - -double FlowSrc::NextTimestamp(double* network_time) - { - if ( ! data && ! ExtractNextPDU() ) - return -1.0; - else - return next_timestamp; - } - -void FlowSrc::Process() - { - if ( ! data && ! ExtractNextPDU() ) - return; - - // This is normally done by calling net_packet_dispatch(), - // but as we don't have a packet to dispatch ... - net_update_time(next_timestamp); - expire_timers(); - - netflow_analyzer->downflow()->set_exporter_ip(exporter_ip); - - // We handle exceptions in NewData (might have changed w/ new binpac). - netflow_analyzer->NewData(0, data, data + pdu_len); - data = 0; - } - -void FlowSrc::Close() - { - safe_close(selectable_fd); - } - - -FlowSocketSrc::~FlowSocketSrc() - { - } - -int FlowSocketSrc::ExtractNextPDU() - { - sockaddr_in from; - socklen_t fromlen = sizeof(from); - pdu_len = recvfrom(selectable_fd, buffer, NF_MAX_PKT_SIZE, 0, - (struct sockaddr*) &from, &fromlen); - if ( pdu_len < 0 ) - { - reporter->Error("problem reading NetFlow data from socket"); - data = 0; - next_timestamp = -1.0; - closed = 1; - return 0; - } - - if ( fromlen != sizeof(from) ) - { - reporter->Error("malformed NetFlow PDU"); - return 0; - } - - data = buffer; - exporter_ip = from.sin_addr.s_addr; - next_timestamp = current_time(); - - if ( next_timestamp < current_timestamp ) - next_timestamp = current_timestamp; - else - current_timestamp = next_timestamp; - - return 1; - } - -FlowSocketSrc::FlowSocketSrc(const char* listen_parms) - { - int n = strlen(listen_parms) + 1; - - char laddr[n], port[n], ident[n]; - laddr[0] = port[0] = ident[0] = '\0'; - - int ret = sscanf(listen_parms, "%[^:]:%[^=]=%s", laddr, port, ident); - if ( ret < 2 ) - { - snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, - "parsing your listen-spec went nuts: laddr='%s', port='%s'\n", - laddr[0] ? laddr : "", port[0] ? port : ""); - closed = 1; - return; - } - - const char* id = (ret == 3) ? ident : listen_parms; - netflow_analyzer->downflow()->set_identifier(id); - - struct addrinfo aiprefs = { - 0, PF_INET, SOCK_DGRAM, IPPROTO_UDP, 0, NULL, NULL, NULL - }; - struct addrinfo* ainfo = 0; - if ( (ret = getaddrinfo(laddr, port, &aiprefs, &ainfo)) != 0 ) - { - snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, - "getaddrinfo(%s, %s, ...): %s", - laddr, port, gai_strerror(ret)); - closed = 1; - return; - } - - if ( (selectable_fd = socket (PF_INET, SOCK_DGRAM, 0)) < 0 ) - { - snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, - "socket: %s", strerror(errno)); - closed = 1; - goto cleanup; - } - - if ( bind (selectable_fd, ainfo->ai_addr, ainfo->ai_addrlen) < 0 ) - { - snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, - "bind: %s", strerror(errno)); - closed = 1; - goto cleanup; - } - -cleanup: - freeaddrinfo(ainfo); - } - - -FlowFileSrc::~FlowFileSrc() - { - delete [] readfile; - } - -int FlowFileSrc::ExtractNextPDU() - { - FlowFileSrcPDUHeader pdu_header; - - if ( read(selectable_fd, &pdu_header, sizeof(pdu_header)) < - int(sizeof(pdu_header)) ) - return Error(errno, "read header"); - - if ( pdu_header.pdu_length > NF_MAX_PKT_SIZE ) - { - reporter->Error("NetFlow packet too long"); - - // Safely skip over the too-long PDU. - if ( lseek(selectable_fd, pdu_header.pdu_length, SEEK_CUR) < 0 ) - return Error(errno, "lseek"); - return 0; - } - - if ( read(selectable_fd, buffer, pdu_header.pdu_length) < - pdu_header.pdu_length ) - return Error(errno, "read data"); - - if ( next_timestamp < pdu_header.network_time ) - { - next_timestamp = pdu_header.network_time; - current_timestamp = pdu_header.network_time; - } - else - current_timestamp = next_timestamp; - - data = buffer; - pdu_len = pdu_header.pdu_length; - exporter_ip = pdu_header.ipaddr; - - return 1; - } - -FlowFileSrc::FlowFileSrc(const char* readfile) - { - int n = strlen(readfile) + 1; - char ident[n]; - this->readfile = new char[n]; - - int ret = sscanf(readfile, "%[^=]=%s", this->readfile, ident); - const char* id = (ret == 2) ? ident : this->readfile; - netflow_analyzer->downflow()->set_identifier(id); - - selectable_fd = open(this->readfile, O_RDONLY); - if ( selectable_fd < 0 ) - { - closed = 1; - snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, - "open: %s", strerror(errno)); - } - } - -int FlowFileSrc::Error(int errlvl, const char* errmsg) - { - snprintf(errbuf, BRO_FLOW_ERRBUF_SIZE, - "%s: %s", errmsg, strerror(errlvl)); - data = 0; - next_timestamp = -1.0; - closed = 1; - return 0; - } diff --git a/src/FlowSrc.h b/src/FlowSrc.h deleted file mode 100644 index 03dda2761d..0000000000 --- a/src/FlowSrc.h +++ /dev/null @@ -1,84 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. -// -// Written by Bernhard Ager, TU Berlin (2006/2007). - -#ifndef flowsrc_h -#define flowsrc_h - -#include "IOSource.h" -#include "NetVar.h" -#include "binpac.h" - -#define BRO_FLOW_ERRBUF_SIZE 512 - -// TODO: 1500 is enough for v5 - how about the others? -// 65536 would be enough for any UDP packet. -#define NF_MAX_PKT_SIZE 8192 - -struct FlowFileSrcPDUHeader { - double network_time; - int pdu_length; - uint32 ipaddr; -}; - -// Avoid including netflow_pac.h by explicitly declaring the NetFlow_Analyzer. -namespace binpac { - namespace NetFlow { - class NetFlow_Analyzer; - } -} - -class FlowSrc : public IOSource { -public: - virtual ~FlowSrc(); - - // IOSource interface: - bool IsReady(); - void GetFds(int* read, int* write, int* except); - double NextTimestamp(double* network_time); - void Process(); - - const char* Tag() { return "FlowSrc"; } - const char* ErrorMsg() const { return errbuf; } - -protected: - FlowSrc(); - - virtual int ExtractNextPDU() = 0; - virtual void Close(); - - int selectable_fd; - - double current_timestamp; - double next_timestamp; - binpac::NetFlow::NetFlow_Analyzer* netflow_analyzer; - - u_char buffer[NF_MAX_PKT_SIZE]; - u_char* data; - int pdu_len; - uint32 exporter_ip; // in network byte order - - char errbuf[BRO_FLOW_ERRBUF_SIZE]; -}; - -class FlowSocketSrc : public FlowSrc { -public: - FlowSocketSrc(const char* listen_parms); - virtual ~FlowSocketSrc(); - - int ExtractNextPDU(); -}; - -class FlowFileSrc : public FlowSrc { -public: - FlowFileSrc(const char* readfile); - ~FlowFileSrc(); - - int ExtractNextPDU(); - -protected: - int Error(int errlvl, const char* errmsg); - char* readfile; -}; - -#endif diff --git a/src/Func.cc b/src/Func.cc index 41eac7d1df..d66e9c71fa 100644 --- a/src/Func.cc +++ b/src/Func.cc @@ -606,14 +606,10 @@ void builtin_error(const char* msg, BroObj* arg) } #include "bro.bif.func_h" -#include "logging.bif.func_h" -#include "input.bif.func_h" #include "reporter.bif.func_h" #include "strings.bif.func_h" #include "bro.bif.func_def" -#include "logging.bif.func_def" -#include "input.bif.func_def" #include "reporter.bif.func_def" #include "strings.bif.func_def" @@ -629,8 +625,6 @@ void init_builtin_funcs() gap_info = internal_type("gap_info")->AsRecordType(); #include "bro.bif.func_init" -#include "logging.bif.func_init" -#include "input.bif.func_init" #include "reporter.bif.func_init" #include "strings.bif.func_init" diff --git a/src/IOSource.cc b/src/IOSource.cc deleted file mode 100644 index d47007caad..0000000000 --- a/src/IOSource.cc +++ /dev/null @@ -1,176 +0,0 @@ -#include -#include -#include -#include - -#include - -#include "util.h" -#include "IOSource.h" - -IOSourceRegistry io_sources; - -IOSourceRegistry::~IOSourceRegistry() - { - for ( SourceList::iterator i = sources.begin(); i != sources.end(); ++i ) - delete *i; - - sources.clear(); - } - -void IOSourceRegistry::RemoveAll() - { - // We're cheating a bit here ... - dont_counts = sources.size(); - } - -IOSource* IOSourceRegistry::FindSoonest(double* ts) - { - // Remove sources which have gone dry. For simplicity, we only - // remove at most one each time. - for ( SourceList::iterator i = sources.begin(); - i != sources.end(); ++i ) - if ( ! (*i)->src->IsOpen() ) - { - delete *i; - sources.erase(i); - break; - } - - // Ideally, we would always call select on the fds to see which - // are ready, and return the soonest. Unfortunately, that'd mean - // one select-call per packet, which we can't afford in high-volume - // environments. Thus, we call select only every SELECT_FREQUENCY - // call (or if all sources report that they are dry). - - ++call_count; - - IOSource* soonest_src = 0; - double soonest_ts = 1e20; - double soonest_local_network_time = 1e20; - bool all_idle = true; - - // Find soonest source of those which tell us they have something to - // process. - for ( SourceList::iterator i = sources.begin(); i != sources.end(); ++i ) - { - if ( ! (*i)->src->IsIdle() ) - { - all_idle = false; - double local_network_time = 0; - double ts = (*i)->src->NextTimestamp(&local_network_time); - if ( ts > 0 && ts < soonest_ts ) - { - soonest_ts = ts; - soonest_src = (*i)->src; - soonest_local_network_time = - local_network_time ? - local_network_time : ts; - } - } - } - - // If we found one and aren't going to select this time, - // return it. - int maxx = 0; - - if ( soonest_src && (call_count % SELECT_FREQUENCY) != 0 ) - goto finished; - - // Select on the join of all file descriptors. - fd_set fd_read, fd_write, fd_except; - - FD_ZERO(&fd_read); - FD_ZERO(&fd_write); - FD_ZERO(&fd_except); - - for ( SourceList::iterator i = sources.begin(); - i != sources.end(); ++i ) - { - Source* src = (*i); - - if ( ! src->src->IsIdle() ) - // No need to select on sources which we know to - // be ready. - continue; - - src->fd_read = src->fd_write = src->fd_except = 0; - src->src->GetFds(&src->fd_read, &src->fd_write, &src->fd_except); - - FD_SET(src->fd_read, &fd_read); - FD_SET(src->fd_write, &fd_write); - FD_SET(src->fd_except, &fd_except); - - maxx = max(src->fd_read, maxx); - maxx = max(src->fd_write, maxx); - maxx = max(src->fd_except, maxx); - } - - // We can't block indefinitely even when all sources are dry: - // we're doing some IOSource-independent stuff in the main loop, - // so we need to return from time to time. (Instead of no time-out - // at all, we use a very small one. This lets FreeBSD trigger a - // BPF buffer switch on the next read when the hold buffer is empty - // while the store buffer isn't filled yet. - - struct timeval timeout; - - if ( all_idle ) - { - // Interesting: when all sources are dry, simply sleeping a - // bit *without* watching for any fd becoming ready may - // decrease CPU load. I guess that's because it allows - // the kernel's packet buffers to fill. - Robin - timeout.tv_sec = 0; - timeout.tv_usec = 20; // SELECT_TIMEOUT; - select(0, 0, 0, 0, &timeout); - } - - if ( ! maxx ) - // No selectable fd at all. - goto finished; - - timeout.tv_sec = 0; - timeout.tv_usec = 0; - - if ( select(maxx + 1, &fd_read, &fd_write, &fd_except, &timeout) > 0 ) - { // Find soonest. - for ( SourceList::iterator i = sources.begin(); - i != sources.end(); ++i ) - { - Source* src = (*i); - - if ( ! src->src->IsIdle() ) - continue; - - if ( FD_ISSET(src->fd_read, &fd_read) || - FD_ISSET(src->fd_write, &fd_write) || - FD_ISSET(src->fd_except, &fd_except) ) - { - double local_network_time = 0; - double ts = src->src->NextTimestamp(&local_network_time); - if ( ts > 0.0 && ts < soonest_ts ) - { - soonest_ts = ts; - soonest_src = src->src; - soonest_local_network_time = - local_network_time ? - local_network_time : ts; - } - } - } - } - -finished: - *ts = soonest_local_network_time; - return soonest_src; - } - -void IOSourceRegistry::Register(IOSource* src, bool dont_count) - { - Source* s = new Source; - s->src = src; - if ( dont_count ) - ++dont_counts; - return sources.push_back(s); - } diff --git a/src/IOSource.h b/src/IOSource.h deleted file mode 100644 index db50bbd2a9..0000000000 --- a/src/IOSource.h +++ /dev/null @@ -1,103 +0,0 @@ -// Interface for classes providing/consuming data during Bro's main loop. - -#ifndef iosource_h -#define iosource_h - -#include -#include "Timer.h" - -using namespace std; - -class IOSource { -public: - IOSource() { idle = closed = false; } - virtual ~IOSource() {} - - // Returns true if source has nothing ready to process. - bool IsIdle() const { return idle; } - - // Returns true if more data is to be expected in the future. - // Otherwise, source may be removed. - bool IsOpen() const { return ! closed; } - - // Returns select'able fds (leaves args untouched if we don't have - // selectable fds). - virtual void GetFds(int* read, int* write, int* except) = 0; - - // The following two methods are only called when either IsIdle() - // returns false or select() on one of the fds indicates that there's - // data to process. - - // Returns timestamp (in global network time) associated with next - // data item. If the source wants the data item to be processed - // with a local network time, it sets the argument accordingly. - virtual double NextTimestamp(double* network_time) = 0; - - // Processes and consumes next data item. - virtual void Process() = 0; - - // Returns tag of timer manager associated with last processed - // data item, nil for global timer manager. - virtual TimerMgr::Tag* GetCurrentTag() { return 0; } - - // Returns a descriptual tag for debugging. - virtual const char* Tag() = 0; - -protected: - // Derived classed are to set this to true if they have gone dry - // temporarily. - bool idle; - - // Derived classed are to set this to true if they have gone dry - // permanently. - bool closed; -}; - -class IOSourceRegistry { -public: - IOSourceRegistry() { call_count = 0; dont_counts = 0; } - ~IOSourceRegistry(); - - // If dont_count is true, this source does not contribute to the - // number of IOSources returned by Size(). The effect is that - // if all sources but the non-counting ones have gone dry, - // processing will shut down. - void Register(IOSource* src, bool dont_count = false); - - // This may block for some time. - IOSource* FindSoonest(double* ts); - - int Size() const { return sources.size() - dont_counts; } - - // Terminate IOSource processing immediately by removing all - // sources (and therefore returning a Size() of zero). - void Terminate() { RemoveAll(); } - -protected: - // When looking for a source with something to process, - // every SELECT_FREQUENCY calls we will go ahead and - // block on a select(). - static const int SELECT_FREQUENCY = 25; - - // Microseconds to wait in an empty select if no source is ready. - static const int SELECT_TIMEOUT = 50; - - void RemoveAll(); - - unsigned int call_count; - int dont_counts; - - struct Source { - IOSource* src; - int fd_read; - int fd_write; - int fd_except; - }; - - typedef list SourceList; - SourceList sources; -}; - -extern IOSourceRegistry io_sources; - -#endif diff --git a/src/Net.cc b/src/Net.cc index 6a5c65c537..adac9c02fd 100644 --- a/src/Net.cc +++ b/src/Net.cc @@ -29,6 +29,9 @@ #include "Anon.h" #include "Serializer.h" #include "PacketDumper.h" +#include "iosource/Manager.h" +#include "iosource/PktSrc.h" +#include "iosource/PktDumper.h" #include "plugin/Manager.h" extern "C" { @@ -39,10 +42,7 @@ extern "C" { extern int select(int, fd_set *, fd_set *, fd_set *, struct timeval *); } -PList(PktSrc) pkt_srcs; - -// FIXME: We should really merge PktDumper and PacketDumper. -PktDumper* pkt_dumper = 0; +iosource::PktDumper* pkt_dumper = 0; int reading_live = 0; int reading_traces = 0; @@ -63,8 +63,8 @@ const u_char* current_pkt = 0; int current_dispatched = 0; int current_hdr_size = 0; double current_timestamp = 0.0; -PktSrc* current_pktsrc = 0; -IOSource* current_iosrc; +iosource::PktSrc* current_pktsrc = 0; +iosource::IOSource* current_iosrc = 0; std::list files_scanned; std::vector sig_files; @@ -113,17 +113,21 @@ RETSIGTYPE watchdog(int /* signo */) // saving the packet which caused the // watchdog to trigger may be helpful, // so we'll save that one nevertheless. - pkt_dumper = new PktDumper("watchdog-pkt.pcap"); - if ( pkt_dumper->IsError() ) + pkt_dumper = iosource_mgr->OpenPktDumper("watchdog-pkt.pcap", false); + if ( ! pkt_dumper || pkt_dumper->IsError() ) { - reporter->Error("watchdog: can't open watchdog-pkt.pcap for writing\n"); - delete pkt_dumper; + reporter->Error("watchdog: can't open watchdog-pkt.pcap for writing"); pkt_dumper = 0; } } if ( pkt_dumper ) - pkt_dumper->Dump(current_hdr, current_pkt); + { + iosource::PktDumper::Packet p; + p.hdr = current_hdr; + p.data = current_pkt; + pkt_dumper->Dump(&p); + } } net_get_final_stats(); @@ -149,118 +153,40 @@ void net_update_time(double new_network_time) } void net_init(name_list& interfaces, name_list& readfiles, - name_list& netflows, name_list& flowfiles, - const char* writefile, const char* filter, - const char* secondary_filter, int do_watchdog) + const char* writefile, int do_watchdog) { - if ( readfiles.length() > 0 || flowfiles.length() > 0 ) + if ( readfiles.length() > 0 ) { reading_live = pseudo_realtime > 0.0; reading_traces = 1; for ( int i = 0; i < readfiles.length(); ++i ) { - PktFileSrc* ps = new PktFileSrc(readfiles[i], filter); + iosource::PktSrc* ps = iosource_mgr->OpenPktSrc(readfiles[i], false); + assert(ps); if ( ! ps->IsOpen() ) - reporter->FatalError("%s: problem with trace file %s - %s\n", - prog, readfiles[i], ps->ErrorMsg()); - else - { - pkt_srcs.append(ps); - io_sources.Register(ps); - } - - if ( secondary_filter ) - { - // We use a second PktFileSrc for the - // secondary path. - PktFileSrc* ps = new PktFileSrc(readfiles[i], - secondary_filter, - TYPE_FILTER_SECONDARY); - - if ( ! ps->IsOpen() ) - reporter->FatalError("%s: problem with trace file %s - %s\n", - prog, readfiles[i], - ps->ErrorMsg()); - else - { - pkt_srcs.append(ps); - io_sources.Register(ps); - } - - ps->AddSecondaryTablePrograms(); - } - } - - for ( int i = 0; i < flowfiles.length(); ++i ) - { - FlowFileSrc* fs = new FlowFileSrc(flowfiles[i]); - - if ( ! fs->IsOpen() ) - reporter->FatalError("%s: problem with netflow file %s - %s\n", - prog, flowfiles[i], fs->ErrorMsg()); - else - { - io_sources.Register(fs); - } + reporter->FatalError("problem with trace file %s (%s)", + readfiles[i], + ps->ErrorMsg()); } } - else if ((interfaces.length() > 0 || netflows.length() > 0)) + else if ( interfaces.length() > 0 ) { reading_live = 1; reading_traces = 0; for ( int i = 0; i < interfaces.length(); ++i ) { - PktSrc* ps; - ps = new PktInterfaceSrc(interfaces[i], filter); + iosource::PktSrc* ps = iosource_mgr->OpenPktSrc(interfaces[i], true); + assert(ps); if ( ! ps->IsOpen() ) - reporter->FatalError("%s: problem with interface %s - %s\n", - prog, interfaces[i], ps->ErrorMsg()); - else - { - pkt_srcs.append(ps); - io_sources.Register(ps); - } - - if ( secondary_filter ) - { - PktSrc* ps; - ps = new PktInterfaceSrc(interfaces[i], - filter, TYPE_FILTER_SECONDARY); - - if ( ! ps->IsOpen() ) - reporter->Error("%s: problem with interface %s - %s\n", - prog, interfaces[i], - ps->ErrorMsg()); - else - { - pkt_srcs.append(ps); - io_sources.Register(ps); - } - - ps->AddSecondaryTablePrograms(); - } + reporter->FatalError("problem with interface %s (%s)", + interfaces[i], + ps->ErrorMsg()); } - - for ( int i = 0; i < netflows.length(); ++i ) - { - FlowSocketSrc* fs = new FlowSocketSrc(netflows[i]); - - if ( ! fs->IsOpen() ) - { - reporter->Error("%s: problem with netflow socket %s - %s\n", - prog, netflows[i], fs->ErrorMsg()); - delete fs; - } - - else - io_sources.Register(fs); - } - } else @@ -272,12 +198,12 @@ void net_init(name_list& interfaces, name_list& readfiles, if ( writefile ) { - // ### This will fail horribly if there are multiple - // interfaces with different-lengthed media. - pkt_dumper = new PktDumper(writefile); - if ( pkt_dumper->IsError() ) - reporter->FatalError("%s: can't open write file \"%s\" - %s\n", - prog, writefile, pkt_dumper->ErrorMsg()); + pkt_dumper = iosource_mgr->OpenPktDumper(writefile, false); + assert(pkt_dumper); + + if ( ! pkt_dumper->IsOpen() ) + reporter->FatalError("problem opening dump file %s (%s)", + writefile, pkt_dumper->ErrorMsg()); ID* id = global_scope()->Lookup("trace_output_file"); if ( ! id ) @@ -298,7 +224,7 @@ void net_init(name_list& interfaces, name_list& readfiles, } } -void expire_timers(PktSrc* src_ps) +void expire_timers(iosource::PktSrc* src_ps) { SegmentProfiler(segment_logger, "expiring-timers"); TimerMgr* tmgr = @@ -311,8 +237,8 @@ void expire_timers(PktSrc* src_ps) } void net_packet_dispatch(double t, const struct pcap_pkthdr* hdr, - const u_char* pkt, int hdr_size, - PktSrc* src_ps) + const u_char* pkt, int hdr_size, + iosource::PktSrc* src_ps) { if ( ! bro_start_network_time ) bro_start_network_time = t; @@ -368,11 +294,11 @@ void net_run() { set_processing_status("RUNNING", "net_run"); - while ( io_sources.Size() || + while ( iosource_mgr->Size() || (BifConst::exit_only_after_terminate && ! terminating) ) { double ts; - IOSource* src = io_sources.FindSoonest(&ts); + iosource::IOSource* src = iosource_mgr->FindSoonest(&ts); #ifdef DEBUG static int loop_counter = 0; @@ -470,16 +396,19 @@ void net_run() void net_get_final_stats() { - loop_over_list(pkt_srcs, i) + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); + + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) { - PktSrc* ps = pkt_srcs[i]; + iosource::PktSrc* ps = *i; if ( ps->IsLive() ) { - struct PktSrc::Stats s; + iosource::PktSrc::Stats s; ps->Statistics(&s); - reporter->Info("%d packets received on interface %s, %d dropped\n", - s.received, ps->Interface(), s.dropped); + reporter->Info("%d packets received on interface %s, %d dropped", + s.received, ps->Path().c_str(), s.dropped); } } } @@ -499,8 +428,6 @@ void net_finish(int drain_events) sessions->Done(); } - delete pkt_dumper; - #ifdef DEBUG extern int reassem_seen_bytes, reassem_copied_bytes; // DEBUG_MSG("Reassembly (TCP and IP/Frag): %d bytes seen, %d bytes copied\n", @@ -521,29 +448,6 @@ void net_delete() delete ip_anonymizer[i]; } -// net_packet_match -// -// Description: -// - Checks if a packet matches a filter. It just wraps up a call to -// [pcap.h's] bpf_filter(). -// -// Inputs: -// - fp: a BPF-compiled filter -// - pkt: a pointer to the packet -// - len: the original packet length -// - caplen: the captured packet length. This is pkt length -// -// Output: -// - return: 1 if the packet matches the filter, 0 otherwise - -int net_packet_match(BPF_Program* fp, const u_char* pkt, - u_int len, u_int caplen) - { - // NOTE: I don't like too much un-const'ing the pkt variable. - return bpf_filter(fp->GetProgram()->bf_insns, (u_char*) pkt, len, caplen); - } - - int _processing_suspended = 0; static double suspend_start = 0; @@ -561,8 +465,12 @@ void net_continue_processing() if ( _processing_suspended == 1 ) { reporter->Info("processing continued"); - loop_over_list(pkt_srcs, i) - pkt_srcs[i]->ContinueAfterSuspend(); + + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); + + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) + (*i)->ContinueAfterSuspend(); } --_processing_suspended; diff --git a/src/Net.h b/src/Net.h index 5fa7210efb..2e466f8c7f 100644 --- a/src/Net.h +++ b/src/Net.h @@ -5,17 +5,15 @@ #include "net_util.h" #include "util.h" -#include "BPF_Program.h" #include "List.h" -#include "PktSrc.h" -#include "FlowSrc.h" #include "Func.h" #include "RemoteSerializer.h" +#include "iosource/IOSource.h" +#include "iosource/PktSrc.h" +#include "iosource/PktDumper.h" extern void net_init(name_list& interfaces, name_list& readfiles, - name_list& netflows, name_list& flowfiles, - const char* writefile, const char* filter, - const char* secondary_filter, int do_watchdog); + const char* writefile, int do_watchdog); extern void net_run(); extern void net_get_final_stats(); extern void net_finish(int drain_events); @@ -23,10 +21,8 @@ extern void net_delete(); // Reclaim all memory, etc. extern void net_update_time(double new_network_time); extern void net_packet_dispatch(double t, const struct pcap_pkthdr* hdr, const u_char* pkt, int hdr_size, - PktSrc* src_ps); -extern int net_packet_match(BPF_Program* fp, const u_char* pkt, - u_int len, u_int caplen); -extern void expire_timers(PktSrc* src_ps = 0); + iosource::PktSrc* src_ps); +extern void expire_timers(iosource::PktSrc* src_ps = 0); extern void termination_signal(); // Functions to temporarily suspend processing of live input (network packets @@ -83,13 +79,10 @@ extern const u_char* current_pkt; extern int current_dispatched; extern int current_hdr_size; extern double current_timestamp; -extern PktSrc* current_pktsrc; -extern IOSource* current_iosrc; +extern iosource::PktSrc* current_pktsrc; +extern iosource::IOSource* current_iosrc; -declare(PList,PktSrc); -extern PList(PktSrc) pkt_srcs; - -extern PktDumper* pkt_dumper; // where to save packets +extern iosource::PktDumper* pkt_dumper; // where to save packets extern char* writefile; diff --git a/src/NetVar.cc b/src/NetVar.cc index 0a11a754bb..7c66b55bc2 100644 --- a/src/NetVar.cc +++ b/src/NetVar.cc @@ -245,8 +245,6 @@ bro_uint_t bits_per_uid; #include "const.bif.netvar_def" #include "types.bif.netvar_def" #include "event.bif.netvar_def" -#include "logging.bif.netvar_def" -#include "input.bif.netvar_def" #include "reporter.bif.netvar_def" void init_event_handlers() @@ -311,8 +309,6 @@ void init_net_var() { #include "const.bif.netvar_init" #include "types.bif.netvar_init" -#include "logging.bif.netvar_init" -#include "input.bif.netvar_init" #include "reporter.bif.netvar_init" conn_id = internal_type("conn_id")->AsRecordType(); diff --git a/src/NetVar.h b/src/NetVar.h index c726c793b2..edd70d1ea6 100644 --- a/src/NetVar.h +++ b/src/NetVar.h @@ -255,8 +255,6 @@ extern void init_net_var(); #include "const.bif.netvar_h" #include "types.bif.netvar_h" #include "event.bif.netvar_h" -#include "logging.bif.netvar_h" -#include "input.bif.netvar_h" #include "reporter.bif.netvar_h" #endif diff --git a/src/Pipe.cc b/src/Pipe.cc new file mode 100644 index 0000000000..3f60409fdb --- /dev/null +++ b/src/Pipe.cc @@ -0,0 +1,83 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Pipe.h" +#include "Reporter.h" +#include +#include +#include +#include + +using namespace bro; + +static void pipe_fail(int eno) + { + char tmp[256]; + strerror_r(eno, tmp, sizeof(tmp)); + reporter->FatalError("Pipe failure: %s", tmp); + } + +static void set_flags(int fd, int flags) + { + if ( flags ) + fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | flags); + } + +static void set_status_flags(int fd, int flags) + { + if ( flags ) + fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | flags); + } + +static int dup_or_fail(int fd, int flags) + { + int rval = dup(fd); + + if ( rval < 0 ) + pipe_fail(errno); + + set_flags(fd, flags); + return rval; + } + +Pipe::Pipe(int flags0, int flags1, int status_flags0, int status_flags1) + { + // pipe2 can set flags atomically, but not yet available everywhere. + if ( ::pipe(fds) ) + pipe_fail(errno); + + flags[0] = flags0; + flags[1] = flags1; + + set_flags(fds[0], flags[0]); + set_flags(fds[1], flags[1]); + set_status_flags(fds[0], status_flags0); + set_status_flags(fds[1], status_flags1); + } + +Pipe::~Pipe() + { + close(fds[0]); + close(fds[1]); + } + +Pipe::Pipe(const Pipe& other) + { + fds[0] = dup_or_fail(other.fds[0], other.flags[0]); + fds[1] = dup_or_fail(other.fds[1], other.flags[1]); + flags[0] = other.flags[0]; + flags[1] = other.flags[1]; + } + +Pipe& Pipe::operator=(const Pipe& other) + { + if ( this == &other ) + return *this; + + close(fds[0]); + close(fds[1]); + fds[0] = dup_or_fail(other.fds[0], other.flags[0]); + fds[1] = dup_or_fail(other.fds[1], other.flags[1]); + flags[0] = other.flags[0]; + flags[1] = other.flags[1]; + return *this; + } diff --git a/src/Pipe.h b/src/Pipe.h new file mode 100644 index 0000000000..77b341117e --- /dev/null +++ b/src/Pipe.h @@ -0,0 +1,56 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef BRO_PIPE_H +#define BRO_PIPE_H + +namespace bro { + +class Pipe { +public: + + /** + * Create a pair of file descriptors via pipe(), or aborts if it cannot. + * @param flags0 file descriptor flags to set on read end of pipe. + * @param flags1 file descriptor flags to set on write end of pipe. + * @param status_flags0 descriptor status flags to set on read end of pipe. + * @param status_flags1 descriptor status flags to set on write end of pipe. + */ + Pipe(int flags0 = 0, int flags1 = 0, int status_flags0 = 0, + int status_flags1 = 0); + + /** + * Close the pair of file descriptors owned by the object. + */ + ~Pipe(); + + /** + * Make a copy of another Pipe object (file descriptors are dup'd). + */ + Pipe(const Pipe& other); + + /** + * Assign a Pipe object by closing file descriptors and duping those of + * the other. + */ + Pipe& operator=(const Pipe& other); + + /** + * @return the file descriptor associated with the read-end of the pipe. + */ + int ReadFD() const + { return fds[0]; } + + /** + * @return the file descriptor associated with the write-end of the pipe. + */ + int WriteFD() const + { return fds[1]; } + +private: + int fds[2]; + int flags[2]; +}; + +} // namespace bro + +#endif // BRO_PIPE_H diff --git a/src/PktSrc.cc b/src/PktSrc.cc deleted file mode 100644 index b5ac3a5d69..0000000000 --- a/src/PktSrc.cc +++ /dev/null @@ -1,804 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#include -#include - -#include "config.h" - -#include "util.h" -#include "PktSrc.h" -#include "Hash.h" -#include "Net.h" -#include "Sessions.h" - - -// ### This needs auto-confing. -#ifdef HAVE_PCAP_INT_H -#include -#endif - -PktSrc::PktSrc() - { - interface = readfile = 0; - data = last_data = 0; - memset(&hdr, 0, sizeof(hdr)); - hdr_size = 0; - datalink = 0; - netmask = 0xffffff00; - pd = 0; - idle = false; - - next_sync_point = 0; - first_timestamp = current_timestamp = next_timestamp = 0.0; - first_wallclock = current_wallclock = 0; - - stats.received = stats.dropped = stats.link = 0; - } - -PktSrc::~PktSrc() - { - Close(); - - loop_over_list(program_list, i) - delete program_list[i]; - - BPF_Program* code; - IterCookie* cookie = filters.InitForIteration(); - while ( (code = filters.NextEntry(cookie)) ) - delete code; - - delete [] interface; - delete [] readfile; - } - -void PktSrc::GetFds(int* read, int* write, int* except) - { - if ( pseudo_realtime ) - { - // Select would give erroneous results. But we simulate it - // by setting idle accordingly. - idle = CheckPseudoTime() == 0; - return; - } - - if ( selectable_fd >= 0 ) - *read = selectable_fd; - } - -int PktSrc::ExtractNextPacket() - { - // Don't return any packets if processing is suspended (except for the - // very first packet which we need to set up times). - if ( net_is_processing_suspended() && first_timestamp ) - { - idle = true; - return 0; - } - - data = last_data = pcap_next(pd, &hdr); - - if ( data && (hdr.len == 0 || hdr.caplen == 0) ) - { - sessions->Weird("empty_pcap_header", &hdr, data); - return 0; - } - - if ( data ) - next_timestamp = hdr.ts.tv_sec + double(hdr.ts.tv_usec) / 1e6; - - if ( pseudo_realtime ) - current_wallclock = current_time(true); - - if ( ! first_timestamp ) - first_timestamp = next_timestamp; - - idle = (data == 0); - - if ( data ) - ++stats.received; - - // Source has gone dry. If it's a network interface, this just means - // it's timed out. If it's a file, though, then the file has been - // exhausted. - if ( ! data && ! IsLive() ) - { - closed = true; - - if ( pseudo_realtime && using_communication ) - { - if ( remote_trace_sync_interval ) - remote_serializer->SendFinalSyncPoint(); - else - remote_serializer->Terminate(); - } - } - - return data != 0; - } - -double PktSrc::NextTimestamp(double* local_network_time) - { - if ( ! data && ! ExtractNextPacket() ) - return -1.0; - - if ( pseudo_realtime ) - { - // Delay packet if necessary. - double packet_time = CheckPseudoTime(); - if ( packet_time ) - return packet_time; - - idle = true; - return -1.0; - } - - return next_timestamp; - } - -void PktSrc::ContinueAfterSuspend() - { - current_wallclock = current_time(true); - } - -double PktSrc::CurrentPacketWallClock() - { - // We stop time when we are suspended. - if ( net_is_processing_suspended() ) - current_wallclock = current_time(true); - - return current_wallclock; - } - -double PktSrc::CheckPseudoTime() - { - if ( ! data && ! ExtractNextPacket() ) - return 0; - - if ( ! current_timestamp ) - return bro_start_time; - - if ( remote_trace_sync_interval ) - { - if ( next_sync_point == 0 || next_timestamp >= next_sync_point ) - { - int n = remote_serializer->SendSyncPoint(); - next_sync_point = first_timestamp + - n * remote_trace_sync_interval; - remote_serializer->Log(RemoteSerializer::LogInfo, - fmt("stopping at packet %.6f, next sync-point at %.6f", - current_timestamp, next_sync_point)); - - return 0; - } - } - - double pseudo_time = next_timestamp - first_timestamp; - double ct = (current_time(true) - first_wallclock) * pseudo_realtime; - - return pseudo_time <= ct ? bro_start_time + pseudo_time : 0; - } - -void PktSrc::Process() - { - if ( ! data && ! ExtractNextPacket() ) - return; - - current_timestamp = next_timestamp; - - int pkt_hdr_size = hdr_size; - - // Unfortunately some packets on the link might have MPLS labels - // while others don't. That means we need to ask the link-layer if - // labels are in place. - bool have_mpls = false; - - int protocol = 0; - - switch ( datalink ) { - case DLT_NULL: - { - protocol = (data[3] << 24) + (data[2] << 16) + (data[1] << 8) + data[0]; - - // From the Wireshark Wiki: "AF_INET6, unfortunately, has - // different values in {NetBSD,OpenBSD,BSD/OS}, - // {FreeBSD,DragonFlyBSD}, and {Darwin/Mac OS X}, so an IPv6 - // packet might have a link-layer header with 24, 28, or 30 - // as the AF_ value." As we may be reading traces captured on - // platforms other than what we're running on, we accept them - // all here. - if ( protocol != AF_INET - && protocol != AF_INET6 - && protocol != 24 - && protocol != 28 - && protocol != 30 ) - { - sessions->Weird("non_ip_packet_in_null_transport", &hdr, data); - data = 0; - return; - } - - break; - } - - case DLT_EN10MB: - { - // Get protocol being carried from the ethernet frame. - protocol = (data[12] << 8) + data[13]; - - switch ( protocol ) - { - // MPLS carried over the ethernet frame. - case 0x8847: - // Remove the data link layer and denote a - // header size of zero before the IP header. - have_mpls = true; - data += get_link_header_size(datalink); - pkt_hdr_size = 0; - break; - - // VLAN carried over the ethernet frame. - case 0x8100: - data += get_link_header_size(datalink); - - // Check for MPLS in VLAN. - if ( ((data[2] << 8) + data[3]) == 0x8847 ) - have_mpls = true; - - data += 4; // Skip the vlan header - pkt_hdr_size = 0; - - // Check for 802.1ah (Q-in-Q) containing IP. - // Only do a second layer of vlan tag - // stripping because there is no - // specification that allows for deeper - // nesting. - if ( ((data[2] << 8) + data[3]) == 0x0800 ) - data += 4; - - break; - - // PPPoE carried over the ethernet frame. - case 0x8864: - data += get_link_header_size(datalink); - protocol = (data[6] << 8) + data[7]; - data += 8; // Skip the PPPoE session and PPP header - pkt_hdr_size = 0; - - if ( protocol != 0x0021 && protocol != 0x0057 ) - { - // Neither IPv4 nor IPv6. - sessions->Weird("non_ip_packet_in_pppoe_encapsulation", &hdr, data); - data = 0; - return; - } - break; - } - - break; - } - - case DLT_PPP_SERIAL: - { - // Get PPP protocol. - protocol = (data[2] << 8) + data[3]; - - if ( protocol == 0x0281 ) - { - // MPLS Unicast. Remove the data link layer and - // denote a header size of zero before the IP header. - have_mpls = true; - data += get_link_header_size(datalink); - pkt_hdr_size = 0; - } - - else if ( protocol != 0x0021 && protocol != 0x0057 ) - { - // Neither IPv4 nor IPv6. - sessions->Weird("non_ip_packet_in_ppp_encapsulation", &hdr, data); - data = 0; - return; - } - break; - } - } - - if ( have_mpls ) - { - // Skip the MPLS label stack. - bool end_of_stack = false; - - while ( ! end_of_stack ) - { - end_of_stack = *(data + 2) & 0x01; - data += 4; - } - } - - if ( pseudo_realtime ) - { - current_pseudo = CheckPseudoTime(); - net_packet_dispatch(current_pseudo, &hdr, data, pkt_hdr_size, this); - if ( ! first_wallclock ) - first_wallclock = current_time(true); - } - - else - net_packet_dispatch(current_timestamp, &hdr, data, pkt_hdr_size, this); - - data = 0; - } - -bool PktSrc::GetCurrentPacket(const struct pcap_pkthdr** arg_hdr, - const u_char** arg_pkt) - { - if ( ! last_data ) - return false; - - *arg_hdr = &hdr; - *arg_pkt = last_data; - return true; - } - -int PktSrc::PrecompileFilter(int index, const char* filter) - { - // Compile filter. - BPF_Program* code = new BPF_Program(); - - if ( ! code->Compile(pd, filter, netmask, errbuf, sizeof(errbuf)) ) - { - delete code; - return 0; - } - - // Store it in hash. - HashKey* hash = new HashKey(HashKey(bro_int_t(index))); - BPF_Program* oldcode = filters.Lookup(hash); - if ( oldcode ) - delete oldcode; - - filters.Insert(hash, code); - delete hash; - - return 1; - } - -int PktSrc::SetFilter(int index) - { - // We don't want load-level filters for the secondary path. - if ( filter_type == TYPE_FILTER_SECONDARY && index > 0 ) - return 1; - - HashKey* hash = new HashKey(HashKey(bro_int_t(index))); - BPF_Program* code = filters.Lookup(hash); - delete hash; - - if ( ! code ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "No precompiled pcap filter for index %d", - index); - return 0; - } - - if ( pcap_setfilter(pd, code->GetProgram()) < 0 ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_setfilter(%d): %s", - index, pcap_geterr(pd)); - return 0; - } - -#ifndef HAVE_LINUX - // Linux doesn't clear counters when resetting filter. - stats.received = stats.dropped = stats.link = 0; -#endif - - return 1; - } - -void PktSrc::SetHdrSize() - { - int dl = pcap_datalink(pd); - hdr_size = get_link_header_size(dl); - - if ( hdr_size < 0 ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "unknown data link type 0x%x", dl); - Close(); - } - - datalink = dl; - } - -void PktSrc::Close() - { - if ( pd ) - { - pcap_close(pd); - pd = 0; - closed = true; - } - } - -void PktSrc::AddSecondaryTablePrograms() - { - BPF_Program* program; - - loop_over_list(secondary_path->EventTable(), i) - { - SecondaryEvent* se = secondary_path->EventTable()[i]; - program = new BPF_Program(); - - if ( ! program->Compile(snaplen, datalink, se->Filter(), - netmask, errbuf, sizeof(errbuf)) ) - { - delete program; - Close(); - return; - } - - SecondaryProgram* sp = new SecondaryProgram(program, se); - program_list.append(sp); - } - } - -void PktSrc::Statistics(Stats* s) - { - if ( reading_traces ) - s->received = s->dropped = s->link = 0; - - else - { - struct pcap_stat pstat; - if ( pcap_stats(pd, &pstat) < 0 ) - { - reporter->Error("problem getting packet filter statistics: %s", - ErrorMsg()); - s->received = s->dropped = s->link = 0; - } - - else - { - s->dropped = pstat.ps_drop; - s->link = pstat.ps_recv; - } - } - - s->received = stats.received; - - if ( pseudo_realtime ) - s->dropped = 0; - - stats.dropped = s->dropped; - } - -PktInterfaceSrc::PktInterfaceSrc(const char* arg_interface, const char* filter, - PktSrc_Filter_Type ft) -: PktSrc() - { - char tmp_errbuf[PCAP_ERRBUF_SIZE]; - filter_type = ft; - - // Determine interface if not specified. - if ( ! arg_interface && ! (arg_interface = pcap_lookupdev(tmp_errbuf)) ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_lookupdev: %s", tmp_errbuf); - return; - } - - interface = copy_string(arg_interface); - - // Determine network and netmask. - uint32 net; - if ( pcap_lookupnet(interface, &net, &netmask, tmp_errbuf) < 0 ) - { - // ### The lookup can fail if no address is assigned to - // the interface; and libpcap doesn't have any useful notion - // of error codes, just error strings - how bogus - so we - // just kludge around the error :-(. - // sprintf(errbuf, "pcap_lookupnet %s", tmp_errbuf); - // return; - net = 0; - netmask = 0xffffff00; - } - - // We use the smallest time-out possible to return almost immediately if - // no packets are available. (We can't use set_nonblocking() as it's - // broken on FreeBSD: even when select() indicates that we can read - // something, we may get nothing if the store buffer hasn't filled up - // yet.) - pd = pcap_open_live(interface, snaplen, 1, 1, tmp_errbuf); - - if ( ! pd ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_open_live: %s", tmp_errbuf); - closed = true; - return; - } - - // ### This needs autoconf'ing. -#ifdef HAVE_PCAP_INT_H - reporter->Info("pcap bufsize = %d\n", ((struct pcap *) pd)->bufsize); -#endif - -#ifdef HAVE_LINUX - if ( pcap_setnonblock(pd, 1, tmp_errbuf) < 0 ) - { - safe_snprintf(errbuf, sizeof(errbuf), - "pcap_setnonblock: %s", tmp_errbuf); - pcap_close(pd); - closed = true; - return; - } -#endif - selectable_fd = pcap_fileno(pd); - - if ( PrecompileFilter(0, filter) && SetFilter(0) ) - { - SetHdrSize(); - - if ( closed ) - // Couldn't get header size. - return; - - reporter->Info("listening on %s, capture length %d bytes\n", interface, snaplen); - } - else - closed = true; - } - - -PktFileSrc::PktFileSrc(const char* arg_readfile, const char* filter, - PktSrc_Filter_Type ft) -: PktSrc() - { - readfile = copy_string(arg_readfile); - - filter_type = ft; - - pd = pcap_open_offline((char*) readfile, errbuf); - - if ( pd && PrecompileFilter(0, filter) && SetFilter(0) ) - { - SetHdrSize(); - - if ( closed ) - // Unknown link layer type. - return; - - // We don't put file sources into non-blocking mode as - // otherwise we would not be able to identify the EOF. - - selectable_fd = fileno(pcap_file(pd)); - - if ( selectable_fd < 0 ) - reporter->InternalError("OS does not support selectable pcap fd"); - } - else - closed = true; - } - - -SecondaryPath::SecondaryPath() - { - filter = 0; - - // Glue together the secondary filter, if exists. - Val* secondary_fv = internal_val("secondary_filters"); - if ( secondary_fv->AsTableVal()->Size() == 0 ) - return; - - int did_first = 0; - const TableEntryValPDict* v = secondary_fv->AsTable(); - IterCookie* c = v->InitForIteration(); - TableEntryVal* tv; - HashKey* h; - - while ( (tv = v->NextEntry(h, c)) ) - { - // Get the index values. - ListVal* index = - secondary_fv->AsTableVal()->RecoverIndex(h); - - const char* str = - index->Index(0)->Ref()->AsString()->CheckString(); - - if ( ++did_first == 1 ) - { - filter = copy_string(str); - } - else - { - if ( strlen(filter) > 0 ) - { - char* tmp_f = new char[strlen(str) + strlen(filter) + 32]; - if ( strlen(str) == 0 ) - sprintf(tmp_f, "%s", filter); - else - sprintf(tmp_f, "(%s) or (%s)", filter, str); - delete [] filter; - filter = tmp_f; - } - } - - // Build secondary_path event table item and link it. - SecondaryEvent* se = - new SecondaryEvent(index->Index(0)->Ref()->AsString()->CheckString(), - tv->Value()->AsFunc() ); - - event_list.append(se); - - delete h; - Unref(index); - } - } - -SecondaryPath::~SecondaryPath() - { - loop_over_list(event_list, i) - delete event_list[i]; - - delete [] filter; - } - - -SecondaryProgram::~SecondaryProgram() - { - delete program; - } - -PktDumper::PktDumper(const char* arg_filename, bool arg_append) - { - filename[0] = '\0'; - is_error = false; - append = arg_append; - dumper = 0; - open_time = 0.0; - - // We need a pcap_t with a reasonable link-layer type. We try to get it - // from the packet sources. If not available, we fall back to Ethernet. - // FIXME: Perhaps we should make this configurable? - int linktype = -1; - - if ( pkt_srcs.length() ) - linktype = pkt_srcs[0]->LinkType(); - - if ( linktype < 0 ) - linktype = DLT_EN10MB; - - pd = pcap_open_dead(linktype, snaplen); - if ( ! pd ) - { - Error("error for pcap_open_dead"); - return; - } - - if ( arg_filename ) - Open(arg_filename); - } - -bool PktDumper::Open(const char* arg_filename) - { - if ( ! arg_filename && ! *filename ) - { - Error("no filename given"); - return false; - } - - if ( arg_filename ) - { - if ( dumper && streq(arg_filename, filename) ) - // Already open. - return true; - - safe_strncpy(filename, arg_filename, FNBUF_LEN); - } - - if ( dumper ) - Close(); - - struct stat s; - int exists = 0; - - if ( append ) - { - // See if output file already exists (and is non-empty). - exists = stat(filename, &s); ; - - if ( exists < 0 && errno != ENOENT ) - { - Error(fmt("can't stat file %s: %s", filename, strerror(errno))); - return false; - } - } - - if ( ! append || exists < 0 || s.st_size == 0 ) - { - // Open new file. - dumper = pcap_dump_open(pd, filename); - if ( ! dumper ) - { - Error(pcap_geterr(pd)); - return false; - } - } - - else - { - // Old file and we need to append, which, unfortunately, - // is not supported by libpcap. So, we have to hack a - // little bit, knowing that pcap_dumpter_t is, in fact, - // a FILE ... :-( - dumper = (pcap_dumper_t*) fopen(filename, "a"); - if ( ! dumper ) - { - Error(fmt("can't open dump %s: %s", filename, strerror(errno))); - return false; - } - } - - open_time = network_time; - is_error = false; - return true; - } - -bool PktDumper::Close() - { - if ( dumper ) - { - pcap_dump_close(dumper); - dumper = 0; - is_error = false; - } - - return true; - } - -bool PktDumper::Dump(const struct pcap_pkthdr* hdr, const u_char* pkt) - { - if ( ! dumper ) - return false; - - if ( ! open_time ) - open_time = network_time; - - pcap_dump((u_char*) dumper, hdr, pkt); - - return true; - } - -void PktDumper::Error(const char* errstr) - { - safe_strncpy(errbuf, errstr, sizeof(errbuf)); - is_error = true; - } - -int get_link_header_size(int dl) - { - switch ( dl ) { - case DLT_NULL: - return 4; - - case DLT_EN10MB: - return 14; - - case DLT_FDDI: - return 13 + 8; // fddi_header + LLC - -#ifdef DLT_LINUX_SLL - case DLT_LINUX_SLL: - return 16; -#endif - - case DLT_PPP_SERIAL: // PPP_SERIAL - return 4; - - case DLT_RAW: - return 0; - } - - return -1; - } diff --git a/src/PktSrc.h b/src/PktSrc.h deleted file mode 100644 index 70eef4dd00..0000000000 --- a/src/PktSrc.h +++ /dev/null @@ -1,258 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#ifndef pktsrc_h -#define pktsrc_h - -#include "Dict.h" -#include "Expr.h" -#include "BPF_Program.h" -#include "IOSource.h" -#include "RemoteSerializer.h" - -#define BRO_PCAP_ERRBUF_SIZE PCAP_ERRBUF_SIZE + 256 - -extern "C" { -#include -} - -declare(PDict,BPF_Program); - -// Whether a PktSrc object is used by the normal filter structure or the -// secondary-path structure. -typedef enum { - TYPE_FILTER_NORMAL, // the normal filter - TYPE_FILTER_SECONDARY, // the secondary-path filter -} PktSrc_Filter_Type; - - -// {filter,event} tuples conforming the secondary path. -class SecondaryEvent { -public: - SecondaryEvent(const char* arg_filter, Func* arg_event) - { - filter = arg_filter; - event = arg_event; - } - - const char* Filter() { return filter; } - Func* Event() { return event; } - -private: - const char* filter; - Func* event; -}; - -declare(PList,SecondaryEvent); -typedef PList(SecondaryEvent) secondary_event_list; - - - -class SecondaryPath { -public: - SecondaryPath(); - ~SecondaryPath(); - - secondary_event_list& EventTable() { return event_list; } - const char* Filter() { return filter; } - -private: - secondary_event_list event_list; - // OR'ed union of all SecondaryEvent filters - char* filter; -}; - -// Main secondary-path object. -extern SecondaryPath* secondary_path; - - -// {program, {filter,event}} tuple table. -class SecondaryProgram { -public: - SecondaryProgram(BPF_Program* arg_program, SecondaryEvent* arg_event) - { - program = arg_program; - event = arg_event; - } - - ~SecondaryProgram(); - - BPF_Program* Program() { return program; } - SecondaryEvent* Event() { return event; } - -private: - // Associated program. - BPF_Program *program; - - // Event that is run in case the program is matched. - SecondaryEvent* event; -}; - -declare(PList,SecondaryProgram); -typedef PList(SecondaryProgram) secondary_program_list; - - - -class PktSrc : public IOSource { -public: - ~PktSrc(); - - // IOSource interface - bool IsReady(); - void GetFds(int* read, int* write, int* except); - double NextTimestamp(double* local_network_time); - void Process(); - const char* Tag() { return "PktSrc"; } - - const char* ErrorMsg() const { return errbuf; } - void ClearErrorMsg() { *errbuf ='\0'; } - - // Returns the packet last processed; false if there is no - // current packet available. - bool GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt); - - int HdrSize() const { return hdr_size; } - int DataLink() const { return datalink; } - - void ConsumePacket() { data = 0; } - - int IsLive() const { return interface != 0; } - - pcap_t* PcapHandle() const { return pd; } - int LinkType() const { return pcap_datalink(pd); } - - const char* ReadFile() const { return readfile; } - const char* Interface() const { return interface; } - PktSrc_Filter_Type FilterType() const { return filter_type; } - void AddSecondaryTablePrograms(); - const secondary_program_list& ProgramTable() const - { return program_list; } - - // Signal packet source that processing was suspended and is now going - // to be continued. - void ContinueAfterSuspend(); - - // Only valid in pseudo-realtime mode. - double CurrentPacketTimestamp() { return current_pseudo; } - double CurrentPacketWallClock(); - - struct Stats { - unsigned int received; // pkts received (w/o drops) - unsigned int dropped; // pkts dropped - unsigned int link; // total packets on link - // (not always not available) - }; - - virtual void Statistics(Stats* stats); - - // Precompiles a filter and associates the given index with it. - // Returns true on success, 0 if a problem occurred. - virtual int PrecompileFilter(int index, const char* filter); - - // Activates the filter with the given index. - // Returns true on success, 0 if a problem occurred. - virtual int SetFilter(int index); - -protected: - PktSrc(); - - static const int PCAP_TIMEOUT = 20; - - void SetHdrSize(); - - virtual void Close(); - - // Returns 1 on success, 0 on time-out/gone dry. - virtual int ExtractNextPacket(); - - // Checks if the current packet has a pseudo-time <= current_time. - // If yes, returns pseudo-time, otherwise 0. - double CheckPseudoTime(); - - double current_timestamp; - double next_timestamp; - - // Only set in pseudo-realtime mode. - double first_timestamp; - double first_wallclock; - double current_wallclock; - double current_pseudo; - - struct pcap_pkthdr hdr; - const u_char* data; // contents of current packet - const u_char* last_data; // same, but unaffected by consuming - int hdr_size; - int datalink; - double next_sync_point; // For trace synchronziation in pseudo-realtime - - char* interface; // nil if not reading from an interface - char* readfile; // nil if not reading from a file - - pcap_t* pd; - int selectable_fd; - uint32 netmask; - char errbuf[BRO_PCAP_ERRBUF_SIZE]; - - Stats stats; - - PDict(BPF_Program) filters; // precompiled filters - - PktSrc_Filter_Type filter_type; // normal path or secondary path - secondary_program_list program_list; -}; - -class PktInterfaceSrc : public PktSrc { -public: - PktInterfaceSrc(const char* interface, const char* filter, - PktSrc_Filter_Type ft=TYPE_FILTER_NORMAL); -}; - -class PktFileSrc : public PktSrc { -public: - PktFileSrc(const char* readfile, const char* filter, - PktSrc_Filter_Type ft=TYPE_FILTER_NORMAL); -}; - - -extern int get_link_header_size(int dl); - -class PktDumper { -public: - PktDumper(const char* file = 0, bool append = false); - ~PktDumper() { Close(); } - - bool Open(const char* file = 0); - bool Close(); - bool Dump(const struct pcap_pkthdr* hdr, const u_char* pkt); - - pcap_dumper_t* PcapDumper() { return dumper; } - - const char* FileName() const { return filename; } - bool IsError() const { return is_error; } - const char* ErrorMsg() const { return errbuf; } - - // This heuristic will horribly fail if we're using packets - // with different link layers. (If we can't derive a reasonable value - // from the packet sources, our fall-back is Ethernet.) - int HdrSize() const - { return get_link_header_size(pcap_datalink(pd)); } - - // Network time when dump file was opened. - double OpenTime() const { return open_time; } - -private: - void InitPd(); - void Error(const char* str); - - static const int FNBUF_LEN = 1024; - char filename[FNBUF_LEN]; - - bool append; - pcap_dumper_t* dumper; - pcap_t* pd; - double open_time; - - bool is_error; - char errbuf[BRO_PCAP_ERRBUF_SIZE]; -}; - -#endif diff --git a/src/RemoteSerializer.cc b/src/RemoteSerializer.cc index 6cda46cd6c..b475c4a8cc 100644 --- a/src/RemoteSerializer.cc +++ b/src/RemoteSerializer.cc @@ -188,10 +188,11 @@ #include "File.h" #include "Conn.h" #include "Reporter.h" -#include "threading/SerialTypes.h" -#include "logging/Manager.h" #include "IPAddr.h" #include "bro_inet_ntop.h" +#include "iosource/Manager.h" +#include "logging/Manager.h" +#include "logging/logging.bif.h" extern "C" { #include "setsignal.h" @@ -284,10 +285,10 @@ struct ping_args { \ if ( ! c ) \ { \ - idle = io->IsIdle();\ + SetIdle(io->IsIdle());\ return true; \ } \ - idle = false; \ + SetIdle(false); \ } static const char* msgToStr(int msg) @@ -533,7 +534,6 @@ RemoteSerializer::RemoteSerializer() current_sync_point = 0; syncing_times = false; io = 0; - closed = false; terminating = false; in_sync = 0; last_flush = 0; @@ -558,7 +558,7 @@ RemoteSerializer::~RemoteSerializer() delete io; } -void RemoteSerializer::Init() +void RemoteSerializer::Enable() { if ( initialized ) return; @@ -571,7 +571,7 @@ void RemoteSerializer::Init() Fork(); - io_sources.Register(this); + iosource_mgr->Register(this); Log(LogInfo, fmt("communication started, parent pid is %d, child pid is %d", getpid(), child_pid)); initialized = 1; @@ -1275,7 +1275,7 @@ bool RemoteSerializer::Listen(const IPAddr& ip, uint16 port, bool expect_ssl, return false; listening = true; - closed = false; + SetClosed(false); return true; } @@ -1344,7 +1344,7 @@ bool RemoteSerializer::StopListening() return false; listening = false; - closed = ! IsActive(); + SetClosed(! IsActive()); return true; } @@ -1367,12 +1367,14 @@ void RemoteSerializer::Unregister(ID* id) } } -void RemoteSerializer::GetFds(int* read, int* write, int* except) +void RemoteSerializer::GetFds(iosource::FD_Set* read, iosource::FD_Set* write, + iosource::FD_Set* except) { - *read = io->Fd(); + read->Insert(io->Fd()); + read->Insert(io->ExtraReadFDs()); if ( io->CanWrite() ) - *write = io->Fd(); + write->Insert(io->Fd()); } double RemoteSerializer::NextTimestamp(double* local_network_time) @@ -1382,7 +1384,7 @@ double RemoteSerializer::NextTimestamp(double* local_network_time) if ( received_logs > 0 ) { // If we processed logs last time, assume there's more. - idle = false; + SetIdle(false); received_logs = 0; return timer_mgr->Time(); } @@ -1397,7 +1399,7 @@ double RemoteSerializer::NextTimestamp(double* local_network_time) pt = timer_mgr->Time(); if ( packets.length() ) - idle = false; + SetIdle(false); if ( et >= 0 && (et < pt || pt < 0) ) return et; @@ -1476,7 +1478,7 @@ void RemoteSerializer::Process() } if ( packets.length() ) - idle = false; + SetIdle(false); } void RemoteSerializer::Finish() @@ -1508,7 +1510,7 @@ bool RemoteSerializer::Poll(bool may_block) } io->Flush(); - idle = false; + SetIdle(false); switch ( msgstate ) { case TYPE: @@ -1690,7 +1692,7 @@ bool RemoteSerializer::DoMessage() case MSG_TERMINATE: assert(terminating); - io_sources.Terminate(); + iosource_mgr->Terminate(); return true; case MSG_REMOTE_PRINT: @@ -1878,7 +1880,7 @@ void RemoteSerializer::RemovePeer(Peer* peer) delete peer->cache_out; delete peer; - closed = ! IsActive(); + SetClosed(! IsActive()); if ( in_sync == peer ) in_sync = 0; @@ -2723,8 +2725,8 @@ bool RemoteSerializer::ProcessLogCreateWriter() fmt.EndRead(); - id_val = new EnumVal(id, BifType::Enum::Log::ID); - writer_val = new EnumVal(writer, BifType::Enum::Log::Writer); + id_val = new EnumVal(id, internal_type("Log::ID")->AsEnumType()); + writer_val = new EnumVal(writer, internal_type("Log::Writer")->AsEnumType()); if ( ! log_mgr->CreateWriter(id_val, writer_val, info, num_fields, fields, true, false, true) ) @@ -2796,8 +2798,8 @@ bool RemoteSerializer::ProcessLogWrite() } } - id_val = new EnumVal(id, BifType::Enum::Log::ID); - writer_val = new EnumVal(writer, BifType::Enum::Log::Writer); + id_val = new EnumVal(id, internal_type("Log::ID")->AsEnumType()); + writer_val = new EnumVal(writer, internal_type("Log::Writer")->AsEnumType()); success = log_mgr->Write(id_val, writer_val, path, num_fields, vals); @@ -2840,7 +2842,7 @@ void RemoteSerializer::GotEvent(const char* name, double time, BufferedEvent* e = new BufferedEvent; // Our time, not the time when the event was generated. - e->time = pkt_srcs.length() ? + e->time = iosource_mgr->GetPktSrcs().size() ? time_t(network_time) : time_t(timer_mgr->Time()); e->src = current_peer->id; @@ -3085,7 +3087,7 @@ RecordVal* RemoteSerializer::GetPeerVal(PeerID id) void RemoteSerializer::ChildDied() { Log(LogError, "child died"); - closed = true; + SetClosed(true); child_pid = 0; // Shut down the main process as well. @@ -3184,7 +3186,7 @@ void RemoteSerializer::FatalError(const char* msg) Log(LogError, msg); reporter->Error("%s", msg); - closed = true; + SetClosed(true); if ( kill(child_pid, SIGQUIT) < 0 ) reporter->Warning("warning: cannot kill child pid %d, %s", child_pid, strerror(errno)); @@ -3355,6 +3357,15 @@ SocketComm::~SocketComm() static unsigned int first_rtime = 0; +static void fd_vector_set(const std::vector& fds, fd_set* set, int* max) + { + for ( size_t i = 0; i < fds.size(); ++i ) + { + FD_SET(fds[i], set); + *max = ::max(fds[i], *max); + } + } + void SocketComm::Run() { first_rtime = (unsigned int) current_time(true); @@ -3376,10 +3387,9 @@ void SocketComm::Run() FD_ZERO(&fd_write); FD_ZERO(&fd_except); - int max_fd = 0; - + int max_fd = io->Fd(); FD_SET(io->Fd(), &fd_read); - max_fd = io->Fd(); + max_fd = std::max(max_fd, io->ExtraReadFDs().Set(&fd_read)); loop_over_list(peers, i) { @@ -3388,6 +3398,8 @@ void SocketComm::Run() FD_SET(peers[i]->io->Fd(), &fd_read); if ( peers[i]->io->Fd() > max_fd ) max_fd = peers[i]->io->Fd(); + max_fd = std::max(max_fd, + peers[i]->io->ExtraReadFDs().Set(&fd_read)); } else { @@ -3438,38 +3450,17 @@ void SocketComm::Run() if ( ! io->IsFillingUp() && shutting_conns_down ) shutting_conns_down = false; - // We cannot rely solely on select() as the there may - // be some data left in our input/output queues. So, we use - // a small timeout for select and check for data - // manually afterwards. - static long selects = 0; static long canwrites = 0; - static long timeouts = 0; ++selects; if ( io->CanWrite() ) ++canwrites; - // FIXME: Fine-tune this (timeouts, flush, etc.) - struct timeval small_timeout; - small_timeout.tv_sec = 0; - small_timeout.tv_usec = - io->CanWrite() || io->CanRead() ? 1 : 10; - -#if 0 - if ( ! io->CanWrite() ) - usleep(10); -#endif - - int a = select(max_fd + 1, &fd_read, &fd_write, &fd_except, - &small_timeout); - - if ( a == 0 ) - ++timeouts; + int a = select(max_fd + 1, &fd_read, &fd_write, &fd_except, 0); if ( selects % 100000 == 0 ) - Log(fmt("selects=%ld canwrites=%ld timeouts=%ld", selects, canwrites, timeouts)); + Log(fmt("selects=%ld canwrites=%ld", selects, canwrites)); if ( a < 0 ) // Ignore errors for now. @@ -4211,6 +4202,7 @@ bool SocketComm::Listen() safe_close(fd); CloseListenFDs(); listen_next_try = time(0) + bind_retry_interval; + freeaddrinfo(res0); return false; } diff --git a/src/RemoteSerializer.h b/src/RemoteSerializer.h index 9dbfbd9dae..2af7610a7c 100644 --- a/src/RemoteSerializer.h +++ b/src/RemoteSerializer.h @@ -6,7 +6,7 @@ #include "Dict.h" #include "List.h" #include "Serializer.h" -#include "IOSource.h" +#include "iosource/IOSource.h" #include "Stats.h" #include "File.h" #include "logging/WriterBackend.h" @@ -22,13 +22,13 @@ namespace threading { } // This class handles the communication done in Bro's main loop. -class RemoteSerializer : public Serializer, public IOSource { +class RemoteSerializer : public Serializer, public iosource::IOSource { public: RemoteSerializer(); virtual ~RemoteSerializer(); // Initialize the remote serializer (calling this will fork). - void Init(); + void Enable(); // FIXME: Use SourceID directly (or rename everything to Peer*). typedef SourceID PeerID; @@ -140,7 +140,8 @@ public: void Finish(); // Overidden from IOSource: - virtual void GetFds(int* read, int* write, int* except); + virtual void GetFds(iosource::FD_Set* read, iosource::FD_Set* write, + iosource::FD_Set* except); virtual double NextTimestamp(double* local_network_time); virtual void Process(); virtual TimerMgr::Tag* GetCurrentTag(); diff --git a/src/Serializer.cc b/src/Serializer.cc index 36b1c74000..7306b0ded0 100644 --- a/src/Serializer.cc +++ b/src/Serializer.cc @@ -19,6 +19,7 @@ #include "Conn.h" #include "Timer.h" #include "RemoteSerializer.h" +#include "iosource/Manager.h" Serializer::Serializer(SerializationFormat* arg_format) { @@ -1045,7 +1046,7 @@ EventPlayer::EventPlayer(const char* file) Error(fmt("event replayer: cannot open %s", file)); if ( ReadHeader() ) - io_sources.Register(this); + iosource_mgr->Register(this); } EventPlayer::~EventPlayer() @@ -1067,9 +1068,10 @@ void EventPlayer::GotFunctionCall(const char* name, double time, // We don't replay function calls. } -void EventPlayer::GetFds(int* read, int* write, int* except) +void EventPlayer::GetFds(iosource::FD_Set* read, iosource::FD_Set* write, + iosource::FD_Set* except) { - *read = fd; + read->Insert(fd); } double EventPlayer::NextTimestamp(double* local_network_time) @@ -1085,7 +1087,7 @@ double EventPlayer::NextTimestamp(double* local_network_time) { UnserialInfo info(this); Unserialize(&info); - closed = io->Eof(); + SetClosed(io->Eof()); } if ( ! ne_time ) @@ -1142,7 +1144,7 @@ bool Packet::Serialize(SerialInfo* info) const static BroFile* profiling_output = 0; #ifdef DEBUG -static PktDumper* dump = 0; +static iosource::PktDumper* dump = 0; #endif Packet* Packet::Unserialize(UnserialInfo* info) @@ -1188,7 +1190,7 @@ Packet* Packet::Unserialize(UnserialInfo* info) p->hdr = hdr; p->pkt = (u_char*) pkt; p->tag = tag; - p->hdr_size = get_link_header_size(p->link_type); + p->hdr_size = iosource::PktSrc::GetLinkHeaderSize(p->link_type); delete [] tag; @@ -1213,9 +1215,15 @@ Packet* Packet::Unserialize(UnserialInfo* info) if ( debug_logger.IsEnabled(DBG_TM) ) { if ( ! dump ) - dump = new PktDumper("tm.pcap"); + dump = iosource_mgr->OpenPktDumper("tm.pcap", true); - dump->Dump(p->hdr, p->pkt); + if ( dump ) + { + iosource::PktDumper::Packet dp; + dp.hdr = p->hdr; + dp.data = p->pkt; + dump->Dump(&dp); + } } #endif diff --git a/src/Serializer.h b/src/Serializer.h index 543797a7af..558dce2086 100644 --- a/src/Serializer.h +++ b/src/Serializer.h @@ -15,7 +15,7 @@ #include "SerialInfo.h" #include "IP.h" #include "Timer.h" -#include "IOSource.h" +#include "iosource/IOSource.h" #include "Reporter.h" class SerializationCache; @@ -350,12 +350,13 @@ public: }; // Plays a file of events back. -class EventPlayer : public FileSerializer, public IOSource { +class EventPlayer : public FileSerializer, public iosource::IOSource { public: EventPlayer(const char* file); virtual ~EventPlayer(); - virtual void GetFds(int* read, int* write, int* except); + virtual void GetFds(iosource::FD_Set* read, iosource::FD_Set* write, + iosource::FD_Set* except); virtual double NextTimestamp(double* local_network_time); virtual void Process(); virtual const char* Tag() { return "EventPlayer"; } diff --git a/src/Sessions.cc b/src/Sessions.cc index ec275a1689..43e55dd95a 100644 --- a/src/Sessions.cc +++ b/src/Sessions.cc @@ -167,7 +167,7 @@ void NetSessions::Done() void NetSessions::DispatchPacket(double t, const struct pcap_pkthdr* hdr, const u_char* pkt, int hdr_size, - PktSrc* src_ps) + iosource::PktSrc* src_ps) { const struct ip* ip_hdr = 0; const u_char* ip_data = 0; @@ -184,10 +184,7 @@ void NetSessions::DispatchPacket(double t, const struct pcap_pkthdr* hdr, // Blanket encapsulation hdr_size += encap_hdr_size; - if ( src_ps->FilterType() == TYPE_FILTER_NORMAL ) - NextPacket(t, hdr, pkt, hdr_size); - else - NextPacketSecondary(t, hdr, pkt, hdr_size, src_ps); + NextPacket(t, hdr, pkt, hdr_size); } void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, @@ -262,53 +259,6 @@ void NetSessions::NextPacket(double t, const struct pcap_pkthdr* hdr, DumpPacket(hdr, pkt); } -void NetSessions::NextPacketSecondary(double /* t */, const struct pcap_pkthdr* hdr, - const u_char* const pkt, int hdr_size, - const PktSrc* src_ps) - { - SegmentProfiler(segment_logger, "processing-secondary-packet"); - - ++num_packets_processed; - - uint32 caplen = hdr->caplen - hdr_size; - if ( caplen < sizeof(struct ip) ) - { - Weird("truncated_IP", hdr, pkt); - return; - } - - const struct ip* ip = (const struct ip*) (pkt + hdr_size); - if ( ip->ip_v == 4 ) - { - const secondary_program_list& spt = src_ps->ProgramTable(); - - loop_over_list(spt, i) - { - SecondaryProgram* sp = spt[i]; - if ( ! net_packet_match(sp->Program(), pkt, - hdr->len, hdr->caplen) ) - continue; - - val_list* args = new val_list; - StringVal* cmd_val = - new StringVal(sp->Event()->Filter()); - args->append(cmd_val); - IP_Hdr ip_hdr(ip, false); - args->append(ip_hdr.BuildPktHdrVal()); - // ### Need to queue event here. - try - { - sp->Event()->Event()->Call(args); - } - - catch ( InterpreterException& e ) - { /* Already reported. */ } - - delete args; - } - } - } - int NetSessions::CheckConnectionTag(Connection* conn) { if ( current_iosrc->GetCurrentTag() ) @@ -1440,14 +1390,24 @@ void NetSessions::DumpPacket(const struct pcap_pkthdr* hdr, return; if ( len == 0 ) - pkt_dumper->Dump(hdr, pkt); + { + iosource::PktDumper::Packet p; + p.hdr = hdr; + p.data = pkt; + pkt_dumper->Dump(&p); + } + else { struct pcap_pkthdr h = *hdr; h.caplen = len; if ( h.caplen > hdr->caplen ) reporter->InternalError("bad modified caplen"); - pkt_dumper->Dump(&h, pkt); + + iosource::PktDumper::Packet p; + p.hdr = &h; + p.data = pkt; + pkt_dumper->Dump(&p); } } diff --git a/src/Sessions.h b/src/Sessions.h index 06cdbca978..c46c092263 100644 --- a/src/Sessions.h +++ b/src/Sessions.h @@ -69,11 +69,11 @@ public: ~NetSessions(); // Main entry point for packet processing. Dispatches the packet - // either through NextPacket() or NextPacketSecondary(), optionally - // employing the packet sorter first. + // either through NextPacket(), optionally employing the packet + // sorter first. void DispatchPacket(double t, const struct pcap_pkthdr* hdr, const u_char* const pkt, int hdr_size, - PktSrc* src_ps); + iosource::PktSrc* src_ps); void Done(); // call to drain events before destructing @@ -221,10 +221,6 @@ protected: void NextPacket(double t, const struct pcap_pkthdr* hdr, const u_char* const pkt, int hdr_size); - void NextPacketSecondary(double t, const struct pcap_pkthdr* hdr, - const u_char* const pkt, int hdr_size, - const PktSrc* src_ps); - // Record the given packet (if a dumper is active). If len=0 // then the whole packet is recorded, otherwise just the first // len bytes. diff --git a/src/Stmt.cc b/src/Stmt.cc index 3571cad197..cb716b3f15 100644 --- a/src/Stmt.cc +++ b/src/Stmt.cc @@ -660,8 +660,13 @@ void Case::Describe(ODesc* d) const TraversalCode Case::Traverse(TraversalCallback* cb) const { - TraversalCode tc = cases->Traverse(cb); - HANDLE_TC_STMT_PRE(tc); + TraversalCode tc; + + if ( cases ) + { + tc = cases->Traverse(cb); + HANDLE_TC_STMT_PRE(tc); + } tc = s->Traverse(cb); HANDLE_TC_STMT_PRE(tc); diff --git a/src/Type.cc b/src/Type.cc index 6a0aa35b1b..ead31f1b7d 100644 --- a/src/Type.cc +++ b/src/Type.cc @@ -1381,6 +1381,11 @@ void OpaqueType::Describe(ODesc* d) const d->Add(name.c_str()); } +void OpaqueType::DescribeReST(ODesc* d, bool roles_only) const + { + d->Add(fmt(":bro:type:`%s` of %s", type_name(Tag()), name.c_str())); + } + IMPLEMENT_SERIAL(OpaqueType, SER_OPAQUE_TYPE); bool OpaqueType::DoSerialize(SerialInfo* info) const @@ -1476,10 +1481,19 @@ void EnumType::CheckAndAddName(const string& module_name, const char* name, } else { + // We allow double-definitions if matching exactly. This is so that + // we can define an enum both in a *.bif and *.bro for avoiding + // cyclic dependencies. + if ( id->Name() != make_full_var_name(module_name.c_str(), name) + || (id->HasVal() && val != id->ID_Val()->AsEnum()) ) + { + Unref(id); + reporter->Error("identifier or enumerator value in enumerated type definition already exists"); + SetError(); + return; + } + Unref(id); - reporter->Error("identifier or enumerator value in enumerated type definition already exists"); - SetError(); - return; } AddNameInternal(module_name, name, val, is_export); diff --git a/src/Type.h b/src/Type.h index a4c9bda541..a9f1e42a6d 100644 --- a/src/Type.h +++ b/src/Type.h @@ -534,6 +534,7 @@ public: const string& Name() const { return name; } void Describe(ODesc* d) const; + void DescribeReST(ODesc* d, bool roles_only = false) const; protected: OpaqueType() { } diff --git a/src/Val.cc b/src/Val.cc index 5f605a178e..7c83830bf9 100644 --- a/src/Val.cc +++ b/src/Val.cc @@ -465,10 +465,7 @@ void Val::Describe(ODesc* d) const d->SP(); } - if ( d->IsReadable() ) - ValDescribe(d); - else - Val::ValDescribe(d); + ValDescribe(d); } void Val::DescribeReST(ODesc* d) const diff --git a/src/Var.cc b/src/Var.cc index aa45faaf41..0a196b9cac 100644 --- a/src/Var.cc +++ b/src/Var.cc @@ -9,6 +9,7 @@ #include "Serializer.h" #include "RemoteSerializer.h" #include "EventRegistry.h" +#include "Traverse.h" static Val* init_val(Expr* init, const BroType* t, Val* aggr) { @@ -392,6 +393,34 @@ void begin_func(ID* id, const char* module_name, function_flavor flavor, } } +class OuterIDBindingFinder : public TraversalCallback { +public: + OuterIDBindingFinder(Scope* s) + : scope(s) { } + + virtual TraversalCode PreExpr(const Expr*); + + Scope* scope; + vector outer_id_references; +}; + +TraversalCode OuterIDBindingFinder::PreExpr(const Expr* expr) + { + if ( expr->Tag() != EXPR_NAME ) + return TC_CONTINUE; + + const NameExpr* e = static_cast(expr); + + if ( e->Id()->IsGlobal() ) + return TC_CONTINUE; + + if ( scope->GetIDs()->Lookup(e->Id()->Name()) ) + return TC_CONTINUE; + + outer_id_references.push_back(e); + return TC_CONTINUE; + } + void end_func(Stmt* body, attr_list* attrs) { int frame_size = current_scope()->Length(); @@ -429,6 +458,16 @@ void end_func(Stmt* body, attr_list* attrs) } } + if ( streq(id->Name(), "anonymous-function") ) + { + OuterIDBindingFinder cb(scope); + body->Traverse(&cb); + + for ( size_t i = 0; i < cb.outer_id_references.size(); ++i ) + cb.outer_id_references[i]->Error( + "referencing outer function IDs not supported"); + } + if ( id->HasVal() ) id->ID_Val()->AsFunc()->AddBody(body, inits, frame_size, priority); else diff --git a/src/analyzer/Analyzer.cc b/src/analyzer/Analyzer.cc index bd85f8263a..fb5602f96e 100644 --- a/src/analyzer/Analyzer.cc +++ b/src/analyzer/Analyzer.cc @@ -4,6 +4,7 @@ #include "Analyzer.h" #include "Manager.h" +#include "binpac.h" #include "analyzer/protocol/pia/PIA.h" #include "../Event.h" diff --git a/src/analyzer/Manager.cc b/src/analyzer/Manager.cc index 6c495a6fd9..bc8fceaf39 100644 --- a/src/analyzer/Manager.cc +++ b/src/analyzer/Manager.cc @@ -60,7 +60,7 @@ bool Manager::ConnIndex::operator<(const ConnIndex& other) const } Manager::Manager() - : plugin::ComponentManager("Analyzer") + : plugin::ComponentManager("Analyzer", "Tag") { } diff --git a/src/analyzer/Manager.h b/src/analyzer/Manager.h index 151e8922ed..2388a36219 100644 --- a/src/analyzer/Manager.h +++ b/src/analyzer/Manager.h @@ -45,10 +45,6 @@ namespace analyzer { * sets up their initial analyzer tree, including adding the right \c PIA, * respecting well-known ports, and tracking any analyzers specifically * scheduled for individidual connections. - * - * Note that we keep the public interface of this class free of std::* - * classes. This allows to external analyzer code to potentially use a - * different C++ standard library. */ class Manager : public plugin::ComponentManager { public: diff --git a/src/analyzer/protocol/dhcp/dhcp-analyzer.pac b/src/analyzer/protocol/dhcp/dhcp-analyzer.pac index 336c8dc760..a967940ca6 100644 --- a/src/analyzer/protocol/dhcp/dhcp-analyzer.pac +++ b/src/analyzer/protocol/dhcp/dhcp-analyzer.pac @@ -188,6 +188,9 @@ flow DHCP_Flow(is_orig: bool) { switch ( type ) { case DHCPOFFER: + if ( ! router_list ) + router_list = new TableVal(dhcp_router_list); + BifEvent::generate_dhcp_offer(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), dhcp_msg_val_->Ref(), new AddrVal(subnet_mask), @@ -195,6 +198,9 @@ flow DHCP_Flow(is_orig: bool) { break; case DHCPACK: + if ( ! router_list ) + router_list = new TableVal(dhcp_router_list); + BifEvent::generate_dhcp_ack(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), dhcp_msg_val_->Ref(), new AddrVal(subnet_mask), @@ -202,12 +208,14 @@ flow DHCP_Flow(is_orig: bool) { break; case DHCPNAK: + Unref(router_list); BifEvent::generate_dhcp_nak(connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), dhcp_msg_val_->Ref(), host_name); break; default: + Unref(router_list); Unref(host_name); break; } diff --git a/src/analyzer/protocol/dhcp/events.bif b/src/analyzer/protocol/dhcp/events.bif index 49a77d969e..bbd27c71f7 100644 --- a/src/analyzer/protocol/dhcp/events.bif +++ b/src/analyzer/protocol/dhcp/events.bif @@ -18,8 +18,8 @@ ## event dhcp_discover%(c: connection, msg: dhcp_msg, req_addr: addr, host_name: string%); -## Generated for DHCP messages of type *DHCPOFFER* (server to client in response to -## DHCPDISCOVER with offer of configuration parameters). +## Generated for DHCP messages of type *DHCPOFFER* (server to client in response +## to DHCPDISCOVER with offer of configuration parameters). ## ## c: The connection record describing the underlying UDP flow. ## @@ -34,7 +34,7 @@ event dhcp_discover%(c: connection, msg: dhcp_msg, req_addr: addr, host_name: st ## serv_addr: The server address specified by the message. ## ## host_name: Optional host name value. May differ from the host name requested -## from the client. +## from the client. ## ## .. bro:see:: dhcp_discover dhcp_request dhcp_decline dhcp_ack dhcp_nak ## dhcp_release dhcp_inform @@ -103,7 +103,7 @@ event dhcp_decline%(c: connection, msg: dhcp_msg, host_name: string%); ## serv_addr: The server address specified by the message. ## ## host_name: Optional host name value. May differ from the host name requested -## from the client. +## from the client. ## ## .. bro:see:: dhcp_discover dhcp_offer dhcp_request dhcp_decline dhcp_nak ## dhcp_release dhcp_inform diff --git a/src/analyzer/protocol/dnp3/dnp3-analyzer.pac b/src/analyzer/protocol/dnp3/dnp3-analyzer.pac index 2ae783c82e..2065237f45 100644 --- a/src/analyzer/protocol/dnp3/dnp3-analyzer.pac +++ b/src/analyzer/protocol/dnp3/dnp3-analyzer.pac @@ -4,6 +4,24 @@ connection DNP3_Conn(bro_analyzer: BroAnalyzer) { downflow = DNP3_Flow(false); }; +%header{ + uint64 bytestring_to_time(const_bytestring time48, size_t length); + %} + +%code{ + uint64 bytestring_to_time(const_bytestring time48, size_t length) + { + /* In DNP3, a timestamp is represented by 6 bytes since epoch + in milliseconds. The 6 bytes are stored in big endian format. */ + uint64 epochTime = 0; + + for ( unsigned int i = 0; i < length; i++ ) + epochTime = time48[length - i - 1] + epochTime * 256; + + return epochTime; + } + %} + flow DNP3_Flow(is_orig: bool) { flowunit = DNP3_PDU(is_orig) withcontext (connection, this); @@ -222,7 +240,7 @@ flow DNP3_Flow(is_orig: bool) { BifEvent::generate_dnp3_frozen_counter_32wFlagTime( connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), - is_orig(), flag, count_value, bytestring_to_val(time48)); + is_orig(), flag, count_value, bytestring_to_time(time48, sizeof(time48))); } return true; @@ -236,7 +254,7 @@ flow DNP3_Flow(is_orig: bool) { BifEvent::generate_dnp3_frozen_counter_16wFlagTime( connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), - is_orig(), flag, count_value, bytestring_to_val(time48)); + is_orig(), flag, count_value, bytestring_to_time(time48, sizeof(time48))); } return true; @@ -390,7 +408,7 @@ flow DNP3_Flow(is_orig: bool) { BifEvent::generate_dnp3_frozen_analog_input_32wTime( connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), - is_orig(), flag, frozen_value, bytestring_to_val(time48)); + is_orig(), flag, frozen_value, bytestring_to_time(time48, sizeof(time48))); } return true; @@ -404,7 +422,7 @@ flow DNP3_Flow(is_orig: bool) { BifEvent::generate_dnp3_frozen_analog_input_16wTime( connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), - is_orig(), flag, frozen_value, bytestring_to_val(time48)); + is_orig(), flag, frozen_value, bytestring_to_time(time48, sizeof(time48))); } return true; @@ -502,7 +520,7 @@ flow DNP3_Flow(is_orig: bool) { BifEvent::generate_dnp3_analog_input_event_32wTime( connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), - is_orig(), flag, value, bytestring_to_val(time48)); + is_orig(), flag, value, bytestring_to_time(time48, sizeof(time48))); } return true; @@ -516,7 +534,7 @@ flow DNP3_Flow(is_orig: bool) { BifEvent::generate_dnp3_analog_input_event_16wTime( connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), - is_orig(), flag, value, bytestring_to_val(time48)); + is_orig(), flag, value, bytestring_to_time(time48, sizeof(time48))); } return true; @@ -558,7 +576,7 @@ flow DNP3_Flow(is_orig: bool) { BifEvent::generate_dnp3_analog_input_event_SPwTime( connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), - is_orig(), flag, value, bytestring_to_val(time48)); + is_orig(), flag, value, bytestring_to_time(time48, sizeof(time48))); } return true; @@ -572,7 +590,7 @@ flow DNP3_Flow(is_orig: bool) { BifEvent::generate_dnp3_analog_input_event_DPwTime( connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), - is_orig(), flag, value_low, value_high, bytestring_to_val(time48)); + is_orig(), flag, value_low, value_high, bytestring_to_time(time48, sizeof(time48))); } return true; @@ -614,7 +632,7 @@ flow DNP3_Flow(is_orig: bool) { BifEvent::generate_dnp3_frozen_analog_input_event_32wTime( connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), - is_orig(), flag, frozen_value, bytestring_to_val(time48)); + is_orig(), flag, frozen_value, bytestring_to_time(time48, sizeof(time48))); } return true; @@ -628,7 +646,7 @@ flow DNP3_Flow(is_orig: bool) { BifEvent::generate_dnp3_frozen_analog_input_event_16wTime( connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), - is_orig(), flag, frozen_value, bytestring_to_val(time48)); + is_orig(), flag, frozen_value, bytestring_to_time(time48, sizeof(time48))); } return true; @@ -670,7 +688,7 @@ flow DNP3_Flow(is_orig: bool) { BifEvent::generate_dnp3_frozen_analog_input_event_SPwTime( connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), - is_orig(), flag, frozen_value, bytestring_to_val(time48)); + is_orig(), flag, frozen_value, bytestring_to_time(time48, sizeof(time48))); } return true; @@ -684,7 +702,7 @@ flow DNP3_Flow(is_orig: bool) { BifEvent::generate_dnp3_frozen_analog_input_event_DPwTime( connection()->bro_analyzer(), connection()->bro_analyzer()->Conn(), - is_orig(), flag, frozen_value_low, frozen_value_high, bytestring_to_val(time48)); + is_orig(), flag, frozen_value_low, frozen_value_high, bytestring_to_time(time48, sizeof(time48))); } return true; diff --git a/src/analyzer/protocol/dnp3/events.bif b/src/analyzer/protocol/dnp3/events.bif index 80f9504a9e..18fc42d6f7 100644 --- a/src/analyzer/protocol/dnp3/events.bif +++ b/src/analyzer/protocol/dnp3/events.bif @@ -130,11 +130,11 @@ event dnp3_frozen_counter_16wFlag%(c: connection, is_orig: bool, flag:count, cou ## Generated for DNP3 objects with the group number 21 and variation number 5 ## frozen counter 32 bit with flag and time -event dnp3_frozen_counter_32wFlagTime%(c: connection, is_orig: bool, flag:count, count_value: count, time48: string%); +event dnp3_frozen_counter_32wFlagTime%(c: connection, is_orig: bool, flag:count, count_value: count, time48: count%); ## Generated for DNP3 objects with the group number 21 and variation number 6 ## frozen counter 16 bit with flag and time -event dnp3_frozen_counter_16wFlagTime%(c: connection, is_orig: bool, flag:count, count_value: count, time48: string%); +event dnp3_frozen_counter_16wFlagTime%(c: connection, is_orig: bool, flag:count, count_value: count, time48: count%); ## Generated for DNP3 objects with the group number 21 and variation number 9 ## frozen counter 32 bit without flag @@ -178,11 +178,11 @@ event dnp3_frozen_analog_input_16wFlag%(c: connection, is_orig: bool, flag: coun ## Generated for DNP3 objects with the group number 31 and variation number 3 ## frozen analog input 32 bit with time-of-freeze -event dnp3_frozen_analog_input_32wTime%(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: string%); +event dnp3_frozen_analog_input_32wTime%(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count%); ## Generated for DNP3 objects with the group number 31 and variation number 4 ## frozen analog input 16 bit with time-of-freeze -event dnp3_frozen_analog_input_16wTime%(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: string%); +event dnp3_frozen_analog_input_16wTime%(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count%); ## Generated for DNP3 objects with the group number 31 and variation number 5 ## frozen analog input 32 bit without flag @@ -210,11 +210,11 @@ event dnp3_analog_input_event_16woTime%(c: connection, is_orig: bool, flag: coun ## Generated for DNP3 objects with the group number 32 and variation number 3 ## analog input event 32 bit with time -event dnp3_analog_input_event_32wTime%(c: connection, is_orig: bool, flag: count, value: count, time48: string%); +event dnp3_analog_input_event_32wTime%(c: connection, is_orig: bool, flag: count, value: count, time48: count%); ## Generated for DNP3 objects with the group number 32 and variation number 4 ## analog input event 16 bit with time -event dnp3_analog_input_event_16wTime%(c: connection, is_orig: bool, flag: count, value: count, time48: string%); +event dnp3_analog_input_event_16wTime%(c: connection, is_orig: bool, flag: count, value: count, time48: count%); ## Generated for DNP3 objects with the group number 32 and variation number 5 ## analog input event single-precision float point without time @@ -226,11 +226,11 @@ event dnp3_analog_input_event_DPwoTime%(c: connection, is_orig: bool, flag: coun ## Generated for DNP3 objects with the group number 32 and variation number 7 ## analog input event single-precision float point with time -event dnp3_analog_input_event_SPwTime%(c: connection, is_orig: bool, flag: count, value: count, time48: string%); +event dnp3_analog_input_event_SPwTime%(c: connection, is_orig: bool, flag: count, value: count, time48: count%); ## Generated for DNP3 objects with the group number 32 and variation number 8 ## analog input event double-precisiion float point with time -event dnp3_analog_input_event_DPwTime%(c: connection, is_orig: bool, flag: count, value_low: count, value_high: count, time48: string%); +event dnp3_analog_input_event_DPwTime%(c: connection, is_orig: bool, flag: count, value_low: count, value_high: count, time48: count%); ## Generated for DNP3 objects with the group number 33 and variation number 1 ## frozen analog input event 32 bit without time @@ -242,11 +242,11 @@ event dnp3_frozen_analog_input_event_16woTime%(c: connection, is_orig: bool, fla ## Generated for DNP3 objects with the group number 33 and variation number 3 ## frozen analog input event 32 bit with time -event dnp3_frozen_analog_input_event_32wTime%(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: string%); +event dnp3_frozen_analog_input_event_32wTime%(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count%); ## Generated for DNP3 objects with the group number 33 and variation number 4 ## frozen analog input event 16 bit with time -event dnp3_frozen_analog_input_event_16wTime%(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: string%); +event dnp3_frozen_analog_input_event_16wTime%(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count%); ## Generated for DNP3 objects with the group number 33 and variation number 5 ## frozen analog input event single-precision float point without time @@ -258,11 +258,11 @@ event dnp3_frozen_analog_input_event_DPwoTime%(c: connection, is_orig: bool, fla ## Generated for DNP3 objects with the group number 33 and variation number 7 ## frozen analog input event single-precision float point with time -event dnp3_frozen_analog_input_event_SPwTime%(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: string%); +event dnp3_frozen_analog_input_event_SPwTime%(c: connection, is_orig: bool, flag: count, frozen_value: count, time48: count%); ## Generated for DNP3 objects with the group number 34 and variation number 8 ## frozen analog input event double-precision float point with time -event dnp3_frozen_analog_input_event_DPwTime%(c: connection, is_orig: bool, flag: count, frozen_value_low: count, frozen_value_high: count, time48: string%); +event dnp3_frozen_analog_input_event_DPwTime%(c: connection, is_orig: bool, flag: count, frozen_value_low: count, frozen_value_high: count, time48: count%); ## g70 event dnp3_file_transport%(c: connection, is_orig: bool, file_handle: count, block_num: count, file_data: string%); diff --git a/src/analyzer/protocol/dns/DNS.cc b/src/analyzer/protocol/dns/DNS.cc index 1c77fc6b51..e551351926 100644 --- a/src/analyzer/protocol/dns/DNS.cc +++ b/src/analyzer/protocol/dns/DNS.cc @@ -692,15 +692,23 @@ int DNS_Interpreter::ParseRR_EDNS(DNS_MsgInfo* msg, data += rdlength; len -= rdlength; } - else - { // no data, move on - data += rdlength; - len -= rdlength; - } return 1; } +void DNS_Interpreter::ExtractOctets(const u_char*& data, int& len, + BroString** p) + { + uint16 dlen = ExtractShort(data, len); + dlen = min(len, static_cast(dlen)); + + if ( p ) + *p = new BroString(data, dlen, 0); + + data += dlen; + len -= dlen; + } + int DNS_Interpreter::ParseRR_TSIG(DNS_MsgInfo* msg, const u_char*& data, int& len, int rdlength, const u_char* msg_start) @@ -718,24 +726,17 @@ int DNS_Interpreter::ParseRR_TSIG(DNS_MsgInfo* msg, uint32 sign_time_sec = ExtractLong(data, len); unsigned int sign_time_msec = ExtractShort(data, len); unsigned int fudge = ExtractShort(data, len); - - u_char request_MAC[16]; - memcpy(request_MAC, data, sizeof(request_MAC)); - - // Here we adjust the size of the requested MAC + u_int16_t - // for length. See RFC 2845, sec 2.3. - int n = sizeof(request_MAC) + sizeof(u_int16_t); - data += n; - len -= n; - + BroString* request_MAC; + ExtractOctets(data, len, &request_MAC); unsigned int orig_id = ExtractShort(data, len); unsigned int rr_error = ExtractShort(data, len); + ExtractOctets(data, len, 0); // Other Data msg->tsig = new TSIG_DATA; msg->tsig->alg_name = new BroString(alg_name, alg_name_end - alg_name, 1); - msg->tsig->sig = new BroString(request_MAC, sizeof(request_MAC), 1); + msg->tsig->sig = request_MAC; msg->tsig->time_s = sign_time_sec; msg->tsig->time_ms = sign_time_msec; msg->tsig->fudge = fudge; diff --git a/src/analyzer/protocol/dns/DNS.h b/src/analyzer/protocol/dns/DNS.h index 569a4ee53a..2d95d979b8 100644 --- a/src/analyzer/protocol/dns/DNS.h +++ b/src/analyzer/protocol/dns/DNS.h @@ -180,6 +180,7 @@ protected: uint16 ExtractShort(const u_char*& data, int& len); uint32 ExtractLong(const u_char*& data, int& len); + void ExtractOctets(const u_char*& data, int& len, BroString** p); int ParseRR_Name(DNS_MsgInfo* msg, const u_char*& data, int& len, int rdlength, diff --git a/src/analyzer/protocol/finger/Plugin.cc b/src/analyzer/protocol/finger/Plugin.cc index 4fbfed8e12..7dbaaf702d 100644 --- a/src/analyzer/protocol/finger/Plugin.cc +++ b/src/analyzer/protocol/finger/Plugin.cc @@ -1,6 +1,5 @@ // See the file in the main distribution directory for copyright. - #include "plugin/Plugin.h" #include "Finger.h" diff --git a/src/analyzer/protocol/http/HTTP.cc b/src/analyzer/protocol/http/HTTP.cc index 02b6947b9f..e63c8280c9 100644 --- a/src/analyzer/protocol/http/HTTP.cc +++ b/src/analyzer/protocol/http/HTTP.cc @@ -243,10 +243,13 @@ int HTTP_Entity::Undelivered(int64_t len) return 0; if ( is_partial_content ) + { precomputed_file_id = file_mgr->Gap(body_length, len, http_message->MyHTTP_Analyzer()->GetAnalyzerTag(), http_message->MyHTTP_Analyzer()->Conn(), http_message->IsOrig(), precomputed_file_id); + offset += len; + } else precomputed_file_id = file_mgr->Gap(body_length, len, http_message->MyHTTP_Analyzer()->GetAnalyzerTag(), @@ -463,6 +466,20 @@ void HTTP_Entity::SubmitAllHeaders() if ( DEBUG_http ) DEBUG_MSG("%.6f end of headers\n", network_time); + if ( Parent() && + Parent()->MIMEContentType() == mime::CONTENT_TYPE_MULTIPART ) + { + // Don't treat single \r or \n characters in the multipart body content + // as lines because the MIME_Entity code will implicitly add back a + // \r\n for each line it receives. We do this instead of setting + // plain delivery mode for the content line analyzer because + // the size of the content to deliver "plainly" may be unknown + // and just leaving it in that mode indefinitely screws up the + // detection of multipart boundaries. + http_message->content_line->SupressWeirds(true); + http_message->content_line->SetCRLFAsEOL(0); + } + // The presence of a message-body in a request is signaled by // the inclusion of a Content-Length or Transfer-Encoding // header field in the request's message-headers. @@ -542,12 +559,9 @@ HTTP_Message::HTTP_Message(HTTP_Analyzer* arg_analyzer, current_entity = 0; top_level = new HTTP_Entity(this, 0, expect_body); + entity_data_buffer = 0; BeginEntity(top_level); - buffer_offset = buffer_size = 0; - data_buffer = 0; - total_buffer_size = 0; - start_time = network_time; body_length = 0; content_gap_length = 0; @@ -557,6 +571,7 @@ HTTP_Message::HTTP_Message(HTTP_Analyzer* arg_analyzer, HTTP_Message::~HTTP_Message() { delete top_level; + delete [] entity_data_buffer; } Val* HTTP_Message::BuildMessageStat(const int interrupted, const char* msg) @@ -604,22 +619,14 @@ void HTTP_Message::Done(const int interrupted, const char* detail) } MyHTTP_Analyzer()->HTTP_MessageDone(is_orig, this); - - delete_strings(buffers); - - if ( data_buffer ) - { - delete data_buffer; - data_buffer = 0; - } } int HTTP_Message::Undelivered(int64_t len) { - if ( ! top_level ) - return 0; + HTTP_Entity* e = current_entity ? current_entity + : static_cast(top_level); - if ( ((HTTP_Entity*) top_level)->Undelivered(len) ) + if ( e && e->Undelivered(len) ) { content_gap_length += len; return 1; @@ -652,8 +659,6 @@ void HTTP_Message::EndEntity(mime::MIME_Entity* entity) body_length += ((HTTP_Entity*) entity)->BodyLength(); header_length += ((HTTP_Entity*) entity)->HeaderLength(); - DeliverEntityData(); - if ( http_end_entity ) { val_list* vl = new val_list(); @@ -664,6 +669,13 @@ void HTTP_Message::EndEntity(mime::MIME_Entity* entity) current_entity = (HTTP_Entity*) entity->Parent(); + if ( entity->Parent() && + entity->Parent()->MIMEContentType() == mime::CONTENT_TYPE_MULTIPART ) + { + content_line->SupressWeirds(false); + content_line->SetCRLFAsEOL(); + } + // It is necessary to call Done when EndEntity is triggered by // SubmitAllHeaders (through EndOfData). if ( entity == top_level ) @@ -720,31 +732,18 @@ void HTTP_Message::SubmitTrailingHeaders(mime::MIME_HeaderList& /* hlist */) void HTTP_Message::SubmitData(int len, const char* buf) { - if ( buf != (const char*) data_buffer->Bytes() + buffer_offset || - buffer_offset + len > buffer_size ) - { - reporter->AnalyzerError(MyHTTP_Analyzer(), - "HTTP message buffer misalignment"); - return; - } - - buffer_offset += len; - if ( buffer_offset >= buffer_size ) - { - buffers.push_back(data_buffer); - data_buffer = 0; - } + if ( http_entity_data ) + MyHTTP_Analyzer()->HTTP_EntityData(is_orig, + new BroString(reinterpret_cast(buf), len, 0)); } int HTTP_Message::RequestBuffer(int* plen, char** pbuf) { - if ( ! data_buffer ) - if ( ! InitBuffer(mime_segment_length) ) - return 0; - - *plen = data_buffer->Len() - buffer_offset; - *pbuf = (char*) data_buffer->Bytes() + buffer_offset; + if ( ! entity_data_buffer ) + entity_data_buffer = new char[http_entity_data_delivery_size]; + *plen = http_entity_data_delivery_size; + *pbuf = entity_data_buffer; return 1; } @@ -785,9 +784,6 @@ void HTTP_Message::SetPlainDelivery(int64_t length) if ( length > 0 && BifConst::skip_http_data ) content_line->SkipBytesAfterThisLine(length); - - if ( ! data_buffer ) - InitBuffer(length); } void HTTP_Message::SkipEntityData() @@ -796,87 +792,6 @@ void HTTP_Message::SkipEntityData() current_entity->SkipBody(); } -void HTTP_Message::DeliverEntityData() - { - if ( http_entity_data ) - { - const BroString* entity_data = 0; - - if ( data_buffer && buffer_offset > 0 ) - { - if ( buffer_offset < buffer_size ) - { - entity_data = new BroString(data_buffer->Bytes(), buffer_offset, 0); - delete data_buffer; - } - else - entity_data = data_buffer; - - data_buffer = 0; - - if ( buffers.empty() ) - MyHTTP_Analyzer()->HTTP_EntityData(is_orig, - entity_data); - else - buffers.push_back(entity_data); - - entity_data = 0; - } - - if ( ! buffers.empty() ) - { - if ( buffers.size() == 1 ) - { - entity_data = buffers[0]; - buffers.clear(); - } - else - { - entity_data = concatenate(buffers); - delete_strings(buffers); - } - - MyHTTP_Analyzer()->HTTP_EntityData(is_orig, entity_data); - } - } - else - { - delete_strings(buffers); - - if ( data_buffer ) - delete data_buffer; - - data_buffer = 0; - } - - total_buffer_size = 0; - } - -int HTTP_Message::InitBuffer(int64_t length) - { - if ( length <= 0 ) - return 0; - - if ( total_buffer_size >= http_entity_data_delivery_size ) - DeliverEntityData(); - - if ( total_buffer_size + length > http_entity_data_delivery_size ) - { - length = http_entity_data_delivery_size - total_buffer_size; - if ( length <= 0 ) - return 0; - } - - u_char* b = new u_char[length]; - data_buffer = new BroString(0, b, length); - - buffer_size = length; - total_buffer_size += length; - buffer_offset = 0; - - return 1; - } - void HTTP_Message::Weird(const char* msg) { analyzer->Weird(msg); @@ -1823,7 +1738,7 @@ void HTTP_Analyzer::ParseVersion(data_chunk_t ver, const IPAddr& host, } } -void HTTP_Analyzer::HTTP_EntityData(int is_orig, const BroString* entity_data) +void HTTP_Analyzer::HTTP_EntityData(int is_orig, BroString* entity_data) { if ( http_entity_data ) { @@ -1831,8 +1746,7 @@ void HTTP_Analyzer::HTTP_EntityData(int is_orig, const BroString* entity_data) vl->append(BuildConnVal()); vl->append(new Val(is_orig, TYPE_BOOL)); vl->append(new Val(entity_data->Len(), TYPE_COUNT)); - // FIXME: Make sure that removing the const here is indeed ok... - vl->append(new StringVal(const_cast(entity_data))); + vl->append(new StringVal(entity_data)); ConnectionEvent(http_entity_data, vl); } else diff --git a/src/analyzer/protocol/http/HTTP.h b/src/analyzer/protocol/http/HTTP.h index 5785d93198..d55c10c4c1 100644 --- a/src/analyzer/protocol/http/HTTP.h +++ b/src/analyzer/protocol/http/HTTP.h @@ -99,6 +99,8 @@ enum { // HTTP_MessageDone -> {Request,Reply}Made class HTTP_Message : public mime::MIME_Message { +friend class HTTP_Entity; + public: HTTP_Message(HTTP_Analyzer* analyzer, tcp::ContentLine_Analyzer* cl, bool is_orig, int expect_body, int64_t init_header_length); @@ -132,13 +134,7 @@ protected: tcp::ContentLine_Analyzer* content_line; bool is_orig; - vector buffers; - - // Controls the total buffer size within http_entity_data_delivery_size. - int total_buffer_size; - - int buffer_offset, buffer_size; - BroString* data_buffer; + char* entity_data_buffer; double start_time; @@ -151,9 +147,6 @@ protected: HTTP_Entity* current_entity; - int InitBuffer(int64_t length); - void DeliverEntityData(); - Val* BuildMessageStat(const int interrupted, const char* msg); }; @@ -165,7 +158,7 @@ public: void Undelivered(tcp::TCP_Endpoint* sender, uint64 seq, int len); void HTTP_Header(int is_orig, mime::MIME_Header* h); - void HTTP_EntityData(int is_orig, const BroString* entity_data); + void HTTP_EntityData(int is_orig, BroString* entity_data); void HTTP_MessageDone(int is_orig, HTTP_Message* message); void HTTP_Event(const char* category, const char* detail); void HTTP_Event(const char* category, StringVal *detail); diff --git a/src/analyzer/protocol/mime/MIME.cc b/src/analyzer/protocol/mime/MIME.cc index 6f992c9256..a1759d97d0 100644 --- a/src/analyzer/protocol/mime/MIME.cc +++ b/src/analyzer/protocol/mime/MIME.cc @@ -142,8 +142,9 @@ int fputs(data_chunk_t b, FILE* fp) void MIME_Mail::Undelivered(int len) { // is_orig param not available, doesn't matter as long as it's consistent - file_mgr->Gap(cur_entity_len, len, analyzer->GetAnalyzerTag(), analyzer->Conn(), - false); + cur_entity_id = file_mgr->Gap(cur_entity_len, len, + analyzer->GetAnalyzerTag(), analyzer->Conn(), + false, cur_entity_id); } int strcasecmp_n(data_chunk_t s, const char* t) @@ -552,6 +553,7 @@ void MIME_Entity::init() data_buf_offset = -1; message = 0; + delay_adding_implicit_CRLF = false; } MIME_Entity::~MIME_Entity() @@ -643,11 +645,7 @@ void MIME_Entity::EndOfData() if ( content_encoding == CONTENT_ENCODING_BASE64 ) FinishDecodeBase64(); - if ( data_buf_offset > 0 ) - { - SubmitData(data_buf_offset, data_buf_data); - data_buf_offset = -1; - } + FlushData(); } message->EndEntity (this); @@ -1001,16 +999,38 @@ void MIME_Entity::DecodeDataLine(int len, const char* data, int trailing_CRLF) DecodeBinary(len, data, trailing_CRLF); break; } + FlushData(); } void MIME_Entity::DecodeBinary(int len, const char* data, int trailing_CRLF) { + if ( delay_adding_implicit_CRLF ) + { + delay_adding_implicit_CRLF = false; + DataOctet(CR); + DataOctet(LF); + } + DataOctets(len, data); if ( trailing_CRLF ) { - DataOctet(CR); - DataOctet(LF); + if ( Parent() && + Parent()->MIMEContentType() == mime::CONTENT_TYPE_MULTIPART ) + { + // For multipart body content, we want to keep all implicit CRLFs + // except for the last because that one belongs to the multipart + // boundary delimiter, not the content. Simply delaying the + // addition of implicit CRLFs until another chunk of content + // data comes in is a way to prevent the CRLF before the final + // message boundary from being accidentally added to the content. + delay_adding_implicit_CRLF = true; + } + else + { + DataOctet(CR); + DataOctet(LF); + } } } @@ -1179,6 +1199,15 @@ void MIME_Entity::DataOctets(int len, const char* data) } } +void MIME_Entity::FlushData() + { + if ( data_buf_offset > 0 ) + { + SubmitData(data_buf_offset, data_buf_data); + data_buf_offset = -1; + } + } + void MIME_Entity::SubmitHeader(MIME_Header* h) { message->SubmitHeader(h); @@ -1325,6 +1354,7 @@ MIME_Mail::~MIME_Mail() void MIME_Mail::BeginEntity(MIME_Entity* /* entity */) { cur_entity_len = 0; + cur_entity_id.clear(); if ( mime_begin_entity ) { @@ -1364,6 +1394,7 @@ void MIME_Mail::EndEntity(MIME_Entity* /* entity */) } file_mgr->EndOfFile(analyzer->GetAnalyzerTag(), analyzer->Conn()); + cur_entity_id.clear(); } void MIME_Mail::SubmitHeader(MIME_Header* h) @@ -1426,8 +1457,9 @@ void MIME_Mail::SubmitData(int len, const char* buf) } // is_orig param not available, doesn't matter as long as it's consistent - file_mgr->DataIn(reinterpret_cast(buf), len, - analyzer->GetAnalyzerTag(), analyzer->Conn(), false); + cur_entity_id = file_mgr->DataIn(reinterpret_cast(buf), len, + analyzer->GetAnalyzerTag(), analyzer->Conn(), false, + cur_entity_id); cur_entity_len += len; buffer_start = (buf + len) - (char*)data_buffer->Bytes(); diff --git a/src/analyzer/protocol/mime/MIME.h b/src/analyzer/protocol/mime/MIME.h index 2b2f88105d..a3ee45d071 100644 --- a/src/analyzer/protocol/mime/MIME.h +++ b/src/analyzer/protocol/mime/MIME.h @@ -133,6 +133,7 @@ protected: int GetDataBuffer(); void DataOctet(char ch); void DataOctets(int len, const char* data); + void FlushData(); virtual void SubmitData(int len, const char* buf); virtual void SubmitHeader(MIME_Header* h); @@ -172,6 +173,7 @@ protected: int data_buf_offset; MIME_Message* message; + bool delay_adding_implicit_CRLF; }; // The reason I separate MIME_Message as an abstract class is to @@ -257,6 +259,7 @@ protected: BroString* data_buffer; uint64 cur_entity_len; + string cur_entity_id; }; diff --git a/src/analyzer/protocol/tcp/ContentLine.cc b/src/analyzer/protocol/tcp/ContentLine.cc index 72314dd45d..f5dd7aaf07 100644 --- a/src/analyzer/protocol/tcp/ContentLine.cc +++ b/src/analyzer/protocol/tcp/ContentLine.cc @@ -32,6 +32,7 @@ void ContentLine_Analyzer::InitState() seq_to_skip = 0; plain_delivery_length = 0; is_plain = 0; + suppress_weirds = false; InitBuffer(0); } @@ -258,7 +259,7 @@ int ContentLine_Analyzer::DoDeliverOnce(int len, const u_char* data) else { - if ( Conn()->FlagEvent(SINGULAR_LF) ) + if ( ! suppress_weirds && Conn()->FlagEvent(SINGULAR_LF) ) Conn()->Weird("line_terminated_with_single_LF"); buf[offset++] = c; } @@ -277,7 +278,7 @@ int ContentLine_Analyzer::DoDeliverOnce(int len, const u_char* data) } if ( last_char == '\r' ) - if ( Conn()->FlagEvent(SINGULAR_CR) ) + if ( ! suppress_weirds && Conn()->FlagEvent(SINGULAR_CR) ) Conn()->Weird("line_terminated_with_single_CR"); last_char = c; @@ -307,7 +308,7 @@ void ContentLine_Analyzer::CheckNUL() ; // Ignore it. else { - if ( Conn()->FlagEvent(NUL_IN_LINE) ) + if ( ! suppress_weirds && Conn()->FlagEvent(NUL_IN_LINE) ) Conn()->Weird("NUL_in_line"); flag_NULs = 0; } diff --git a/src/analyzer/protocol/tcp/ContentLine.h b/src/analyzer/protocol/tcp/ContentLine.h index 93c473c47c..7a5a6b996e 100644 --- a/src/analyzer/protocol/tcp/ContentLine.h +++ b/src/analyzer/protocol/tcp/ContentLine.h @@ -15,6 +15,9 @@ public: ContentLine_Analyzer(Connection* conn, bool orig); ~ContentLine_Analyzer(); + void SupressWeirds(bool enable) + { suppress_weirds = enable; } + // If enabled, flag (first) line with embedded NUL. Default off. void SetIsNULSensitive(bool enable) { flag_NULs = enable; } @@ -96,6 +99,8 @@ protected: // Don't deliver further data. int skip_deliveries; + bool suppress_weirds; + // If true, flag (first) line with embedded NUL. unsigned int flag_NULs:1; diff --git a/src/analyzer/protocol/tcp/TCP_Reassembler.cc b/src/analyzer/protocol/tcp/TCP_Reassembler.cc index 053e8c8f60..0f7699011e 100644 --- a/src/analyzer/protocol/tcp/TCP_Reassembler.cc +++ b/src/analyzer/protocol/tcp/TCP_Reassembler.cc @@ -117,6 +117,45 @@ void TCP_Reassembler::SetContentsFile(BroFile* f) record_contents_file = f; } +static inline bool established(const TCP_Endpoint* a, const TCP_Endpoint* b) + { + return a->state == TCP_ENDPOINT_ESTABLISHED && + b->state == TCP_ENDPOINT_ESTABLISHED; + } + +static inline bool report_gap(const TCP_Endpoint* a, const TCP_Endpoint* b) + { + return content_gap && + ( BifConst::report_gaps_for_partial || established(a, b) ); + } + +void TCP_Reassembler::Gap(uint64 seq, uint64 len) + { + // Only report on content gaps for connections that + // are in a cleanly established state. In other + // states, these can arise falsely due to things + // like sequence number mismatches in RSTs, or + // unseen previous packets in partial connections. + // The one opportunity we lose here is on clean FIN + // handshakes, but Oh Well. + + if ( report_gap(endp, endp->peer) ) + { + val_list* vl = new val_list; + vl->append(dst_analyzer->BuildConnVal()); + vl->append(new Val(IsOrig(), TYPE_BOOL)); + vl->append(new Val(seq, TYPE_COUNT)); + vl->append(new Val(len, TYPE_COUNT)); + dst_analyzer->ConnectionEvent(content_gap, vl); + } + + if ( type == Direct ) + dst_analyzer->NextUndelivered(seq, len, IsOrig()); + else + dst_analyzer->ForwardUndelivered(seq, len, IsOrig()); + + had_gap = true; + } void TCP_Reassembler::Undelivered(uint64 up_to_seq) { @@ -189,48 +228,35 @@ void TCP_Reassembler::Undelivered(uint64 up_to_seq) if ( ! skip_deliveries ) { - // This can happen because we're processing a trace - // that's been filtered. For example, if it's just - // SYN/FIN data, then there can be data in the FIN - // packet, but it's undelievered because it's out of - // sequence. - - uint64 seq = last_reassem_seq; - uint64 len = up_to_seq - last_reassem_seq; - - // Only report on content gaps for connections that - // are in a cleanly established state. In other - // states, these can arise falsely due to things - // like sequence number mismatches in RSTs, or - // unseen previous packets in partial connections. - // The one opportunity we lose here is on clean FIN - // handshakes, but Oh Well. - - if ( content_gap && - (BifConst::report_gaps_for_partial || - (endpoint->state == TCP_ENDPOINT_ESTABLISHED && - peer->state == TCP_ENDPOINT_ESTABLISHED ) ) ) + // If we have blocks that begin below up_to_seq, deliver them. + DataBlock* b = blocks; + while ( b ) { - val_list* vl = new val_list; - vl->append(dst_analyzer->BuildConnVal()); - vl->append(new Val(IsOrig(), TYPE_BOOL)); - vl->append(new Val(seq, TYPE_COUNT)); - vl->append(new Val(len, TYPE_COUNT)); + if ( b->seq < last_reassem_seq ) + { + // Already delivered this block. + b = b->next; + continue; + } - dst_analyzer->ConnectionEvent(content_gap, vl); + if ( b->seq >= up_to_seq ) + // Block is beyond what we need to process at this point. + break; + + uint64 gap_at_seq = last_reassem_seq; + uint64 gap_len = b->seq - last_reassem_seq; + + Gap(gap_at_seq, gap_len); + last_reassem_seq += gap_len; + BlockInserted(b); + // Inserting a block may cause trimming of what's buffered, + // so have to assume 'b' is invalid, hence re-assign to start. + b = blocks; } - if ( type == Direct ) - dst_analyzer->NextUndelivered(last_reassem_seq, - len, IsOrig()); - else - { - dst_analyzer->ForwardUndelivered(last_reassem_seq, - len, IsOrig()); - } + if ( up_to_seq > last_reassem_seq ) + Gap(last_reassem_seq, up_to_seq - last_reassem_seq); } - - had_gap = true; } // We should record and match undelivered even if we are skipping @@ -243,7 +269,8 @@ void TCP_Reassembler::Undelivered(uint64 up_to_seq) MatchUndelivered(up_to_seq, false); // But we need to re-adjust last_reassem_seq in either case. - last_reassem_seq = up_to_seq; // we've done our best ... + if ( up_to_seq > last_reassem_seq ) + last_reassem_seq = up_to_seq; // we've done our best ... } void TCP_Reassembler::MatchUndelivered(uint64 up_to_seq, bool use_last_upper) diff --git a/src/analyzer/protocol/tcp/TCP_Reassembler.h b/src/analyzer/protocol/tcp/TCP_Reassembler.h index 3dfe75bf10..5d8badcef1 100644 --- a/src/analyzer/protocol/tcp/TCP_Reassembler.h +++ b/src/analyzer/protocol/tcp/TCP_Reassembler.h @@ -94,6 +94,7 @@ private: DECLARE_SERIAL(TCP_Reassembler); void Undelivered(uint64 up_to_seq); + void Gap(uint64 seq, uint64 len); void RecordToSeq(uint64 start_seq, uint64 stop_seq, BroFile* f); void RecordBlock(DataBlock* b, BroFile* f); diff --git a/src/bro.bif b/src/bro.bif index 1029896295..1757a9d12e 100644 --- a/src/bro.bif +++ b/src/bro.bif @@ -21,6 +21,7 @@ #include "IPAddr.h" #include "util.h" #include "file_analysis/Manager.h" +#include "iosource/Manager.h" using namespace std; @@ -33,7 +34,7 @@ TableType* var_sizes; // and hence it's declared in NetVar.{h,cc}. extern RecordType* gap_info; -static PktDumper* addl_pkt_dumper = 0; +static iosource::PktDumper* addl_pkt_dumper = 0; bro_int_t parse_int(const char*& fmt) { @@ -1675,11 +1676,14 @@ function net_stats%(%): NetStats unsigned int drop = 0; unsigned int link = 0; - loop_over_list(pkt_srcs, i) - { - PktSrc* ps = pkt_srcs[i]; + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); - struct PktSrc::Stats stat; + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) + { + iosource::PktSrc* ps = *i; + + struct iosource::PktSrc::Stats stat; ps->Statistics(&stat); recv += stat.received; drop += stat.dropped; @@ -3224,10 +3228,15 @@ function dump_current_packet%(file_name: string%) : bool return new Val(0, TYPE_BOOL); if ( ! addl_pkt_dumper ) - addl_pkt_dumper = new PktDumper(0, true); + addl_pkt_dumper = iosource_mgr->OpenPktDumper(file_name->CheckString(), true); - addl_pkt_dumper->Open(file_name->CheckString()); - addl_pkt_dumper->Dump(hdr, pkt); + if ( addl_pkt_dumper ) + { + iosource::PktDumper::Packet p; + p.hdr = hdr; + p.data = pkt; + addl_pkt_dumper->Dump(&p); + } return new Val(! addl_pkt_dumper->IsError(), TYPE_BOOL); %} @@ -3284,10 +3293,15 @@ function dump_packet%(pkt: pcap_packet, file_name: string%) : bool hdr.len = (*pkt_vl)[3]->AsCount(); if ( ! addl_pkt_dumper ) - addl_pkt_dumper = new PktDumper(0, true); + addl_pkt_dumper = iosource_mgr->OpenPktDumper(file_name->CheckString(), true); - addl_pkt_dumper->Open(file_name->CheckString()); - addl_pkt_dumper->Dump(&hdr, (*pkt_vl)[4]->AsString()->Bytes()); + if ( addl_pkt_dumper ) + { + iosource::PktDumper::Packet p; + p.hdr = &hdr; + p.data = (*pkt_vl)[4]->AsString()->Bytes(); + addl_pkt_dumper->Dump(&p); + } return new Val(addl_pkt_dumper->IsError(), TYPE_BOOL); %} @@ -4110,14 +4124,14 @@ function rotate_file_by_name%(f: string%): rotate_info bool is_addl_pkt_dumper = false; // Special case: one of current dump files. - if ( pkt_dumper && streq(pkt_dumper->FileName(), f->CheckString()) ) + if ( pkt_dumper && streq(pkt_dumper->Path().c_str(), f->CheckString()) ) { is_pkt_dumper = true; pkt_dumper->Close(); } if ( addl_pkt_dumper && - streq(addl_pkt_dumper->FileName(), f->CheckString()) ) + streq(addl_pkt_dumper->Path().c_str(), f->CheckString()) ) { is_addl_pkt_dumper = true; addl_pkt_dumper->Close(); @@ -4214,103 +4228,6 @@ function enable_raw_output%(f: file%): any # # =========================================================================== -## Precompiles a PCAP filter and binds it to a given identifier. -## -## id: The PCAP identifier to reference the filter *s* later on. -## -## s: The PCAP filter. See ``man tcpdump`` for valid expressions. -## -## Returns: True if *s* is valid and precompiles successfully. -## -## .. bro:see:: install_pcap_filter -## install_src_addr_filter -## install_src_net_filter -## uninstall_src_addr_filter -## uninstall_src_net_filter -## install_dst_addr_filter -## install_dst_net_filter -## uninstall_dst_addr_filter -## uninstall_dst_net_filter -## pcap_error -function precompile_pcap_filter%(id: PcapFilterID, s: string%): bool - %{ - bool success = true; - - loop_over_list(pkt_srcs, i) - { - pkt_srcs[i]->ClearErrorMsg(); - - if ( ! pkt_srcs[i]->PrecompileFilter(id->ForceAsInt(), - s->CheckString()) ) - { - reporter->Error("precompile_pcap_filter: %s", - pkt_srcs[i]->ErrorMsg()); - success = false; - } - } - - return new Val(success, TYPE_BOOL); - %} - -## Installs a PCAP filter that has been precompiled with -## :bro:id:`precompile_pcap_filter`. -## -## id: The PCAP filter id of a precompiled filter. -## -## Returns: True if the filter associated with *id* has been installed -## successfully. -## -## .. bro:see:: precompile_pcap_filter -## install_src_addr_filter -## install_src_net_filter -## uninstall_src_addr_filter -## uninstall_src_net_filter -## install_dst_addr_filter -## install_dst_net_filter -## uninstall_dst_addr_filter -## uninstall_dst_net_filter -## pcap_error -function install_pcap_filter%(id: PcapFilterID%): bool - %{ - bool success = true; - - loop_over_list(pkt_srcs, i) - { - pkt_srcs[i]->ClearErrorMsg(); - - if ( ! pkt_srcs[i]->SetFilter(id->ForceAsInt()) ) - success = false; - } - - return new Val(success, TYPE_BOOL); - %} - -## Returns a string representation of the last PCAP error. -## -## Returns: A descriptive error message of the PCAP function that failed. -## -## .. bro:see:: precompile_pcap_filter -## install_pcap_filter -## install_src_addr_filter -## install_src_net_filter -## uninstall_src_addr_filter -## uninstall_src_net_filter -## install_dst_addr_filter -## install_dst_net_filter -## uninstall_dst_addr_filter -## uninstall_dst_net_filter -function pcap_error%(%): string - %{ - loop_over_list(pkt_srcs, i) - { - const char* err = pkt_srcs[i]->ErrorMsg(); - if ( *err ) - return new StringVal(err); - } - - return new StringVal("no error"); - %} - ## Installs a filter to drop packets from a given IP source address with ## a certain probability if none of a given set of TCP flags are set. ## Note that for IPv6 packets with a Destination options header that has @@ -4542,7 +4459,7 @@ function enable_communication%(%): any return 0; using_communication = 1; - remote_serializer->Init(); + remote_serializer->Enable(); return 0; %} diff --git a/src/broxygen/Manager.cc b/src/broxygen/Manager.cc index 2b1159a3a0..3a07191f33 100644 --- a/src/broxygen/Manager.cc +++ b/src/broxygen/Manager.cc @@ -16,6 +16,16 @@ static void DbgAndWarn(const char* msg) DBG_LOG(DBG_BROXYGEN, "%s", msg); } +static void WarnMissingScript(const char* type, const ID* id, + string script) + { + if ( script == "" ) + return; + + DbgAndWarn(fmt("Can't document %s %s, lookup of %s failed", + type, id->Name(), script.c_str())); + } + static string RemoveLeadingSpace(const string& s) { if ( s.empty() || s[0] != ' ' ) @@ -220,8 +230,7 @@ void Manager::StartType(ID* id) if ( ! script_info ) { - DbgAndWarn(fmt("Can't document identifier %s, lookup of %s failed", - id->Name(), script.c_str())); + WarnMissingScript("identifier", id, script); return; } @@ -285,8 +294,7 @@ void Manager::Identifier(ID* id) if ( ! script_info ) { - DbgAndWarn(fmt("Can't document identifier %s, lookup of %s failed", - id->Name(), script.c_str())); + WarnMissingScript("identifier", id, script); return; } @@ -340,8 +348,7 @@ void Manager::Redef(const ID* id, const string& path) if ( ! script_info ) { - DbgAndWarn(fmt("Can't document redef of %s, lookup of %s failed", - id->Name(), from_script.c_str())); + WarnMissingScript("redef", id, from_script); return; } diff --git a/src/file_analysis/File.cc b/src/file_analysis/File.cc index 50d7d48336..4509fc7d42 100644 --- a/src/file_analysis/File.cc +++ b/src/file_analysis/File.cc @@ -437,7 +437,7 @@ void File::EndOfFile() void File::Gap(uint64 offset, uint64 len) { DBG_LOG(DBG_FILE_ANALYSIS, "[%s] Gap of size %" PRIu64 " at offset %" PRIu64, - id.c_str(), offset, len); + id.c_str(), len, offset); analyzers.DrainModifications(); diff --git a/src/file_analysis/Manager.cc b/src/file_analysis/Manager.cc index 2a96315dbb..59c0fa0023 100644 --- a/src/file_analysis/Manager.cc +++ b/src/file_analysis/Manager.cc @@ -22,7 +22,7 @@ string Manager::salt; Manager::Manager() : plugin::ComponentManager("Files"), + file_analysis::Component>("Files", "Tag"), id_map(), ignored(), current_file_id(), magic_state() { } diff --git a/src/file_analysis/analyzer/x509/functions.bif b/src/file_analysis/analyzer/x509/functions.bif index 9a8a8e78b7..216f4c69cc 100644 --- a/src/file_analysis/analyzer/x509/functions.bif +++ b/src/file_analysis/analyzer/x509/functions.bif @@ -104,6 +104,39 @@ STACK_OF(X509)* x509_get_untrusted_stack(VectorVal* certs_vec) return untrusted_certs; } +// We need this function to be able to identify the signer certificate of an +// OCSP request out of a list of possible certificates. +X509* x509_get_ocsp_signer(STACK_OF(X509) *certs, OCSP_RESPID *rid) + { + // We support two lookup types - either by response id or by key. + if ( rid->type == V_OCSP_RESPID_NAME ) + return X509_find_by_subject(certs, rid->value.byName); + + // There only should be name and type - but let's be sure... + if ( rid->type != V_OCSP_RESPID_KEY ) + return 0; + + // Just like OpenSSL, we just support SHA-1 lookups and bail out otherwhise. + if ( rid->value.byKey->length != SHA_DIGEST_LENGTH ) + return 0; + + unsigned char* key_hash = rid->value.byKey->data; + for ( int i = 0; i < sk_X509_num(certs); ++i ) + { + unsigned char digest[SHA_DIGEST_LENGTH]; + X509* cert = sk_X509_value(certs, i); + if ( ! X509_pubkey_digest(cert, EVP_sha1(), digest, NULL) ) + // digest failed for this certificate, try with next + continue; + + if ( memcmp(digest, key_hash, SHA_DIGEST_LENGTH) == 0 ) + // keys match, return certificate + return cert; + } + + return 0; + } + %%} ## Parses a certificate into an X509::Certificate structure. @@ -221,6 +254,7 @@ function x509_ocsp_verify%(certs: x509_opaque_vector, ocsp_reply: string, root_c int out = -1; int result = -1; X509* issuer_certificate = 0; + X509* signer = 0; OCSP_RESPONSE *resp = d2i_OCSP_RESPONSE(NULL, &start, ocsp_reply->Len()); if ( ! resp ) { @@ -250,19 +284,47 @@ function x509_ocsp_verify%(certs: x509_opaque_vector, ocsp_reply: string, root_c // inject the certificates in the certificate list of the OCSP reply, they actually are used during // the lookup. // Yay. + + if ( ! basic->certs ) + { + basic->certs = sk_X509_new_null(); + if ( ! basic->certs ) + { + rval = x509_result_record(-1, "Could not allocate basic x509 stack"); + goto x509_ocsp_cleanup; + } + } + issuer_certificate = 0; for ( int i = 0; i < sk_X509_num(untrusted_certs); i++) { sk_X509_push(basic->certs, X509_dup(sk_X509_value(untrusted_certs, i))); - if ( X509_NAME_cmp(X509_get_issuer_name(cert), X509_get_subject_name(sk_X509_value(untrusted_certs, i))) ) + if ( X509_NAME_cmp(X509_get_issuer_name(cert), X509_get_subject_name(sk_X509_value(untrusted_certs, i))) == 0 ) issuer_certificate = sk_X509_value(untrusted_certs, i); } // Because we actually want to be able to give nice error messages that show why we were // not able to verify the OCSP response - do our own verification logic first. + signer = x509_get_ocsp_signer(basic->certs, basic->tbsResponseData->responderId); + + /* + Do this perhaps - OpenSSL also cannot do it, so I do not really feel bad about it. + Needs a different lookup because the root store is no stack of X509 certs + + if ( !s igner ) + // if we did not find it in the certificates that were sent, search in the root store + signer = x509_get_ocsp_signer(basic->certs, basic->tbsResponseData->responderId); + */ + + if ( ! signer ) + { + rval = x509_result_record(-1, "Could not find OCSP responder certificate"); + goto x509_ocsp_cleanup; + } + csc = X509_STORE_CTX_new(); - X509_STORE_CTX_init(csc, ctx, sk_X509_value(basic->certs, 0), basic->certs); + X509_STORE_CTX_init(csc, ctx, signer, basic->certs); X509_STORE_CTX_set_time(csc, 0, (time_t) verify_time); X509_STORE_CTX_set_purpose(csc, X509_PURPOSE_OCSP_HELPER); @@ -281,7 +343,6 @@ function x509_ocsp_verify%(certs: x509_opaque_vector, ocsp_reply: string, root_c goto x509_ocsp_cleanup; } - // ok, now we verified the OCSP response. This means that we have a valid chain tying it // to a root that we trust and that the signature also hopefully is valid. This does not yet // mean that the ocsp response actually matches the certificate the server send us or that @@ -322,7 +383,7 @@ function x509_ocsp_verify%(certs: x509_opaque_vector, ocsp_reply: string, root_c goto x509_ocsp_cleanup; } - if ( ! OCSP_id_cmp(certid, single->certId) ) + if ( OCSP_id_cmp(certid, single->certId) != 0 ) return x509_result_record(-1, "OCSP reply is not for host certificate"); // next - check freshness of proof... diff --git a/src/input/CMakeLists.txt b/src/input/CMakeLists.txt new file mode 100644 index 0000000000..b1c79d2bd0 --- /dev/null +++ b/src/input/CMakeLists.txt @@ -0,0 +1,23 @@ + +include(BroSubdir) + +include_directories(BEFORE + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} +) + +add_subdirectory(readers) + +set(input_SRCS + Component.cc + Manager.cc + ReaderBackend.cc + ReaderFrontend.cc + Tag.cc +) + +bif_target(input.bif) + +bro_add_subdir_library(input ${input_SRCS} ${BIF_OUTPUT_CC}) +add_dependencies(bro_input generate_outputs) + diff --git a/src/input/Component.cc b/src/input/Component.cc new file mode 100644 index 0000000000..fd70c76216 --- /dev/null +++ b/src/input/Component.cc @@ -0,0 +1,28 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Component.h" +#include "Manager.h" + +#include "../Desc.h" +#include "../util.h" + +using namespace input; + +Component::Component(const std::string& name, factory_callback arg_factory) + : plugin::Component(plugin::component::WRITER, name) + { + factory = arg_factory; + + input_mgr->RegisterComponent(this, "READER_"); + } + +Component::~Component() + { + } + +void Component::DoDescribe(ODesc* d) const + { + d->Add("Input::READER_"); + d->Add(CanonicalName()); + } + diff --git a/src/input/Component.h b/src/input/Component.h new file mode 100644 index 0000000000..0812aa63cf --- /dev/null +++ b/src/input/Component.h @@ -0,0 +1,59 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef INPUT_COMPONENT_H +#define INPUT_COMPONENT_H + +#include "Tag.h" +#include "plugin/Component.h" +#include "plugin/TaggedComponent.h" + +namespace input { + +class ReaderFrontend; +class ReaderBackend; + +/** + * Component description for plugins providing log readers. + */ +class Component : public plugin::Component, + public plugin::TaggedComponent { +public: + typedef ReaderBackend* (*factory_callback)(ReaderFrontend* frontend); + + /** + * Constructor. + * + * @param name The name of the provided reader. This name is used + * across the system to identify the reader. + * + * @param factory A factory function to instantiate instances of the + * readers's class, which must be derived directly or indirectly from + * input::ReaderBackend. This is typically a static \c Instatiate() + * method inside the class that just allocates and returns a new + * instance. + */ + Component(const std::string& name, factory_callback factory); + + /** + * Destructor. + */ + ~Component(); + + /** + * Returns the reader's factory function. + */ + factory_callback Factory() const { return factory; } + +protected: + /** + * Overriden from plugin::Component. + */ + virtual void DoDescribe(ODesc* d) const; + +private: + factory_callback factory; +}; + +} + +#endif diff --git a/src/input/Manager.cc b/src/input/Manager.cc index 95983faf26..044f9fcae3 100644 --- a/src/input/Manager.cc +++ b/src/input/Manager.cc @@ -5,11 +5,7 @@ #include "Manager.h" #include "ReaderFrontend.h" #include "ReaderBackend.h" -#include "readers/Ascii.h" -#include "readers/Raw.h" -#include "readers/Benchmark.h" -#include "readers/Binary.h" -#include "readers/SQLite.h" +#include "input.bif.h" #include "Event.h" #include "EventHandler.h" @@ -24,24 +20,6 @@ using namespace input; using threading::Value; using threading::Field; -struct ReaderDefinition { - bro_int_t type; // The reader type. - const char *name; // Descriptive name for error messages. - bool (*init)(); // Optional one-time initializing function. - ReaderBackend* (*factory)(ReaderFrontend* frontend); // Factory function for creating instances. -}; - -ReaderDefinition input_readers[] = { - { BifEnum::Input::READER_ASCII, "Ascii", 0, reader::Ascii::Instantiate }, - { BifEnum::Input::READER_RAW, "Raw", 0, reader::Raw::Instantiate }, - { BifEnum::Input::READER_BENCHMARK, "Benchmark", 0, reader::Benchmark::Instantiate }, - { BifEnum::Input::READER_BINARY, "Binary", 0, reader::Binary::Instantiate }, - { BifEnum::Input::READER_SQLITE, "SQLite", 0, reader::SQLite::Instantiate }, - - // End marker - { BifEnum::Input::READER_DEFAULT, "None", 0, (ReaderBackend* (*)(ReaderFrontend* frontend))0 } -}; - static void delete_value_ptr_array(Value** vals, int num_fields) { for ( int i = 0; i < num_fields; ++i ) @@ -215,6 +193,7 @@ Manager::AnalysisStream::~AnalysisStream() } Manager::Manager() + : plugin::ComponentManager("Input", "Reader") { end_of_data = internal_handler("Input::end_of_data"); } @@ -229,55 +208,17 @@ Manager::~Manager() } -ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) +ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, EnumVal* tag) { - ReaderDefinition* ir = input_readers; + Component* c = Lookup(tag); - while ( true ) + if ( ! c ) { - if ( ir->type == BifEnum::Input::READER_DEFAULT ) - { - reporter->Error("The reader that was requested was not found and could not be initialized."); - return 0; - } - - if ( ir->type != type ) - { - // no, didn't find the right one... - ++ir; - continue; - } - - - // call init function of writer if presnt - if ( ir->init ) - { - if ( (*ir->init)() ) - { - //clear it to be not called again - ir->init = 0; - } - - else { - // ohok. init failed, kill factory for all eternity - ir->factory = 0; - DBG_LOG(DBG_LOGGING, "Failed to init input class %s", ir->name); - return 0; - } - - } - - if ( ! ir->factory ) - // no factory? - return 0; - - // all done. break. - break; + reporter->Error("The reader that was requested was not found and could not be initialized."); + return 0; } - assert(ir->factory); - - ReaderBackend* backend = (*ir->factory)(frontend); + ReaderBackend* backend = (*c->Factory())(frontend); assert(backend); return backend; @@ -286,8 +227,6 @@ ReaderBackend* Manager::CreateBackend(ReaderFrontend* frontend, bro_int_t type) // Create a new input reader object to be used at whomevers leisure lateron. bool Manager::CreateStream(Stream* info, RecordVal* description) { - ReaderDefinition* ir = input_readers; - RecordType* rtype = description->Type()->AsRecordType(); if ( ! ( same_type(rtype, BifType::Record::Input::TableDescription, 0) || same_type(rtype, BifType::Record::Input::EventDescription, 0) diff --git a/src/input/Manager.h b/src/input/Manager.h index 8156ed5248..cfac803129 100644 --- a/src/input/Manager.h +++ b/src/input/Manager.h @@ -10,6 +10,8 @@ #include "RemoteSerializer.h" #include "Val.h" +#include "Component.h" + #include namespace input { @@ -20,7 +22,7 @@ class ReaderBackend; /** * Singleton class for managing input streams. */ -class Manager { +class Manager : public plugin::ComponentManager { public: /** * Constructor. @@ -131,7 +133,7 @@ protected: // Instantiates a new ReaderBackend of the given type (note that // doing so creates a new thread!). - ReaderBackend* CreateBackend(ReaderFrontend* frontend, bro_int_t type); + ReaderBackend* CreateBackend(ReaderFrontend* frontend, EnumVal* tag); // Function called from the ReaderBackend to notify the manager that // a stream has been removed or a stream has been closed. Used to diff --git a/src/input/ReaderBackend.cc b/src/input/ReaderBackend.cc index 4c7540609c..72043c5932 100644 --- a/src/input/ReaderBackend.cc +++ b/src/input/ReaderBackend.cc @@ -156,7 +156,7 @@ public: } }; -using namespace logging; +using namespace input; ReaderBackend::ReaderBackend(ReaderFrontend* arg_frontend) : MsgThread() { diff --git a/src/input/ReaderBackend.h b/src/input/ReaderBackend.h index 84984a3ce5..e87789abbd 100644 --- a/src/input/ReaderBackend.h +++ b/src/input/ReaderBackend.h @@ -8,6 +8,8 @@ #include "threading/SerialTypes.h" #include "threading/MsgThread.h" +#include "Component.h" + namespace input { /** diff --git a/src/input/ReaderFrontend.cc b/src/input/ReaderFrontend.cc index d28f410de0..3852a1002a 100644 --- a/src/input/ReaderFrontend.cc +++ b/src/input/ReaderFrontend.cc @@ -44,7 +44,7 @@ ReaderFrontend::ReaderFrontend(const ReaderBackend::ReaderInfo& arg_info, EnumVa const char* t = type->Type()->AsEnumType()->Lookup(type->InternalInt()); name = copy_string(fmt("%s/%s", arg_info.source, t)); - backend = input_mgr->CreateBackend(this, type->InternalInt()); + backend = input_mgr->CreateBackend(this, type); assert(backend); backend->Start(); } diff --git a/src/input/Tag.cc b/src/input/Tag.cc new file mode 100644 index 0000000000..4f227f7799 --- /dev/null +++ b/src/input/Tag.cc @@ -0,0 +1,22 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Tag.h" +#include "Manager.h" + +input::Tag input::Tag::Error; + +input::Tag::Tag(type_t type, subtype_t subtype) + : ::Tag(input_mgr->GetTagEnumType(), type, subtype) + { + } + +input::Tag& input::Tag::operator=(const input::Tag& other) + { + ::Tag::operator=(other); + return *this; + } + +EnumVal* input::Tag::AsEnumVal() const + { + return ::Tag::AsEnumVal(input_mgr->GetTagEnumType()); + } diff --git a/src/input/Tag.h b/src/input/Tag.h new file mode 100644 index 0000000000..8188fbc294 --- /dev/null +++ b/src/input/Tag.h @@ -0,0 +1,116 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef INPUT_TAG_H +#define INPUT_TAG_H + +#include "config.h" +#include "util.h" +#include "../Tag.h" +#include "plugin/TaggedComponent.h" +#include "plugin/ComponentManager.h" + +class EnumVal; + +namespace input { + +class Manager; +class Component; + +/** + * Class to identify a reader type. + * + * The script-layer analogue is Input::Reader. + */ +class Tag : public ::Tag { +public: + /* + * Copy constructor. + */ + Tag(const Tag& other) : ::Tag(other) {} + + /** + * Default constructor. This initializes the tag with an error value + * that will make \c operator \c bool return false. + */ + Tag() : ::Tag() {} + + /** + * Destructor. + */ + ~Tag() {} + + /** + * Returns false if the tag represents an error value rather than a + * legal reader type. + * TODO: make this conversion operator "explicit" (C++11) or use a + * "safe bool" idiom (not necessary if "explicit" is available), + * otherwise this may allow nonsense/undesired comparison operations. + */ + operator bool() const { return *this != Tag(); } + + /** + * Assignment operator. + */ + Tag& operator=(const Tag& other); + + /** + * Compares two tags for equality. + */ + bool operator==(const Tag& other) const + { + return ::Tag::operator==(other); + } + + /** + * Compares two tags for inequality. + */ + bool operator!=(const Tag& other) const + { + return ::Tag::operator!=(other); + } + + /** + * Compares two tags for less-than relationship. + */ + bool operator<(const Tag& other) const + { + return ::Tag::operator<(other); + } + + /** + * Returns the \c Input::Reader enum that corresponds to this tag. + * The returned value does not have its ref-count increased. + * + * @param etype the script-layer enum type associated with the tag. + */ + EnumVal* AsEnumVal() const; + + static Tag Error; + +protected: + friend class plugin::ComponentManager; + friend class plugin::TaggedComponent; + + /** + * Constructor. + * + * @param type The main type. Note that the \a input::Manager + * manages the value space internally, so noone else should assign + * any main types. + * + * @param subtype The sub type, which is left to an reader for + * interpretation. By default it's set to zero. + */ + Tag(type_t type, subtype_t subtype = 0); + + /** + * Constructor. + * + * @param val An enum value of script type \c Input::Reader. + */ + Tag(EnumVal* val) : ::Tag(val) {} +}; + +} + +#endif diff --git a/src/input.bif b/src/input/input.bif similarity index 68% rename from src/input.bif rename to src/input/input.bif index f7c4d37a67..b28ccc00d8 100644 --- a/src/input.bif +++ b/src/input/input.bif @@ -4,9 +4,14 @@ module Input; %%{ #include "input/Manager.h" -#include "NetVar.h" %%} +enum Event %{ + EVENT_NEW = 0, + EVENT_CHANGED = 1, + EVENT_REMOVED = 2, +%} + type TableDescription: record; type EventDescription: record; type AnalysisDescription: record; @@ -45,30 +50,3 @@ function Input::__force_update%(id: string%) : bool const accept_unsupported_types: bool; -# Options for Ascii Reader - -module InputAscii; - -const separator: string; -const set_separator: string; -const empty_field: string; -const unset_field: string; - -module InputRaw; -const record_separator: string; - -module InputBenchmark; -const factor: double; -const spread: count; -const autospread: double; -const addfactor: count; -const stopspreadat: count; -const timedspread: double; - -module InputBinary; -const chunk_size: count; - -module InputSQLite; -const set_separator: string; -const unset_field: string; -const empty_field: string; diff --git a/src/input/readers/CMakeLists.txt b/src/input/readers/CMakeLists.txt new file mode 100644 index 0000000000..36b7439052 --- /dev/null +++ b/src/input/readers/CMakeLists.txt @@ -0,0 +1,6 @@ + +add_subdirectory(ascii) +add_subdirectory(benchmark) +add_subdirectory(binary) +add_subdirectory(raw) +add_subdirectory(sqlite) diff --git a/src/input/readers/Ascii.cc b/src/input/readers/ascii/Ascii.cc similarity index 99% rename from src/input/readers/Ascii.cc rename to src/input/readers/ascii/Ascii.cc index a79121e80a..1bbcaea1d9 100644 --- a/src/input/readers/Ascii.cc +++ b/src/input/readers/ascii/Ascii.cc @@ -1,18 +1,18 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "Ascii.h" -#include "NetVar.h" - #include #include -#include "../../threading/SerialTypes.h" - #include #include #include #include +#include "Ascii.h" +#include "ascii.bif.h" + +#include "threading/SerialTypes.h" + using namespace input::reader; using namespace threading; using threading::Value; diff --git a/src/input/readers/Ascii.h b/src/input/readers/ascii/Ascii.h similarity index 98% rename from src/input/readers/Ascii.h rename to src/input/readers/ascii/Ascii.h index 5d6bc71d54..fe9bb95845 100644 --- a/src/input/readers/Ascii.h +++ b/src/input/readers/ascii/Ascii.h @@ -6,7 +6,7 @@ #include #include -#include "../ReaderBackend.h" +#include "input/ReaderBackend.h" #include "threading/formatters/Ascii.h" namespace input { namespace reader { diff --git a/src/input/readers/ascii/CMakeLists.txt b/src/input/readers/ascii/CMakeLists.txt new file mode 100644 index 0000000000..267bb9a7ab --- /dev/null +++ b/src/input/readers/ascii/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro AsciiReader) +bro_plugin_cc(Ascii.cc Plugin.cc) +bro_plugin_bif(ascii.bif) +bro_plugin_end() diff --git a/src/input/readers/ascii/Plugin.cc b/src/input/readers/ascii/Plugin.cc new file mode 100644 index 0000000000..b389cb8602 --- /dev/null +++ b/src/input/readers/ascii/Plugin.cc @@ -0,0 +1,24 @@ +// See the file in the main distribution directory for copyright. + +#include "plugin/Plugin.h" + +#include "Ascii.h" + +namespace plugin { +namespace Bro_AsciiReader { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::input::Component("Ascii", ::input::reader::Ascii::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::AsciiReader"; + config.description = "ASCII input reader"; + return config; + } +} plugin; + +} +} diff --git a/src/input/readers/ascii/ascii.bif b/src/input/readers/ascii/ascii.bif new file mode 100644 index 0000000000..8bb3a96492 --- /dev/null +++ b/src/input/readers/ascii/ascii.bif @@ -0,0 +1,7 @@ + +module InputAscii; + +const separator: string; +const set_separator: string; +const empty_field: string; +const unset_field: string; diff --git a/src/input/readers/Benchmark.cc b/src/input/readers/benchmark/Benchmark.cc similarity index 98% rename from src/input/readers/Benchmark.cc rename to src/input/readers/benchmark/Benchmark.cc index de7eae8cc8..9d962c8c64 100644 --- a/src/input/readers/Benchmark.cc +++ b/src/input/readers/benchmark/Benchmark.cc @@ -1,16 +1,15 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "Benchmark.h" -#include "NetVar.h" - -#include "../../threading/SerialTypes.h" - #include #include #include #include -#include "../../threading/Manager.h" +#include "Benchmark.h" +#include "benchmark.bif.h" + +#include "threading/SerialTypes.h" +#include "threading/Manager.h" using namespace input::reader; using threading::Value; diff --git a/src/input/readers/Benchmark.h b/src/input/readers/benchmark/Benchmark.h similarity index 97% rename from src/input/readers/Benchmark.h rename to src/input/readers/benchmark/Benchmark.h index 3296f3a85e..42501c1c29 100644 --- a/src/input/readers/Benchmark.h +++ b/src/input/readers/benchmark/Benchmark.h @@ -3,7 +3,7 @@ #ifndef INPUT_READERS_BENCHMARK_H #define INPUT_READERS_BENCHMARK_H -#include "../ReaderBackend.h" +#include "input/ReaderBackend.h" #include "threading/formatters/Ascii.h" namespace input { namespace reader { diff --git a/src/input/readers/benchmark/CMakeLists.txt b/src/input/readers/benchmark/CMakeLists.txt new file mode 100644 index 0000000000..3b3a34ae47 --- /dev/null +++ b/src/input/readers/benchmark/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro BenchmarkReader) +bro_plugin_cc(Benchmark.cc Plugin.cc) +bro_plugin_bif(benchmark.bif) +bro_plugin_end() diff --git a/src/input/readers/benchmark/Plugin.cc b/src/input/readers/benchmark/Plugin.cc new file mode 100644 index 0000000000..d5e0975a80 --- /dev/null +++ b/src/input/readers/benchmark/Plugin.cc @@ -0,0 +1,24 @@ +// See the file in the main distribution directory for copyright. + +#include "plugin/Plugin.h" + +#include "Benchmark.h" + +namespace plugin { +namespace Bro_BenchmarkReader { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::input::Component("Benchmark", ::input::reader::Benchmark::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::BenchmarkReader"; + config.description = "Benchmark input reader"; + return config; + } +} plugin; + +} +} diff --git a/src/input/readers/benchmark/benchmark.bif b/src/input/readers/benchmark/benchmark.bif new file mode 100644 index 0000000000..d505f0efaf --- /dev/null +++ b/src/input/readers/benchmark/benchmark.bif @@ -0,0 +1,9 @@ + +module InputBenchmark; + +const factor: double; +const spread: count; +const autospread: double; +const addfactor: count; +const stopspreadat: count; +const timedspread: double; diff --git a/src/input/readers/Binary.cc b/src/input/readers/binary/Binary.cc similarity index 98% rename from src/input/readers/Binary.cc rename to src/input/readers/binary/Binary.cc index 96a9028f7b..560a80f9a0 100644 --- a/src/input/readers/Binary.cc +++ b/src/input/readers/binary/Binary.cc @@ -3,9 +3,9 @@ #include #include "Binary.h" -#include "NetVar.h" +#include "binary.bif.h" -#include "../../threading/SerialTypes.h" +#include "threading/SerialTypes.h" using namespace input::reader; using threading::Value; diff --git a/src/input/readers/Binary.h b/src/input/readers/binary/Binary.h similarity index 96% rename from src/input/readers/Binary.h rename to src/input/readers/binary/Binary.h index a2283d1980..587d56cfa7 100644 --- a/src/input/readers/Binary.h +++ b/src/input/readers/binary/Binary.h @@ -5,7 +5,7 @@ #include -#include "../ReaderBackend.h" +#include "input/ReaderBackend.h" namespace input { namespace reader { diff --git a/src/input/readers/binary/CMakeLists.txt b/src/input/readers/binary/CMakeLists.txt new file mode 100644 index 0000000000..800c3b7567 --- /dev/null +++ b/src/input/readers/binary/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro BinaryReader) +bro_plugin_cc(Binary.cc Plugin.cc) +bro_plugin_bif(binary.bif) +bro_plugin_end() diff --git a/src/input/readers/binary/Plugin.cc b/src/input/readers/binary/Plugin.cc new file mode 100644 index 0000000000..7c5dc16b8b --- /dev/null +++ b/src/input/readers/binary/Plugin.cc @@ -0,0 +1,24 @@ +// See the file in the main distribution directory for copyright. + +#include "plugin/Plugin.h" + +#include "Binary.h" + +namespace plugin { +namespace Bro_BinaryReader { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::input::Component("Binary", ::input::reader::Binary::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::BinaryReader"; + config.description = "Binary input reader"; + return config; + } +} plugin; + +} +} diff --git a/src/input/readers/binary/binary.bif b/src/input/readers/binary/binary.bif new file mode 100644 index 0000000000..54e32ff453 --- /dev/null +++ b/src/input/readers/binary/binary.bif @@ -0,0 +1,4 @@ + +module InputBinary; + +const chunk_size: count; diff --git a/src/input/readers/raw/CMakeLists.txt b/src/input/readers/raw/CMakeLists.txt new file mode 100644 index 0000000000..5540d70202 --- /dev/null +++ b/src/input/readers/raw/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro RawReader) +bro_plugin_cc(Raw.cc Plugin.cc) +bro_plugin_bif(raw.bif) +bro_plugin_end() diff --git a/src/input/readers/raw/Plugin.cc b/src/input/readers/raw/Plugin.cc new file mode 100644 index 0000000000..c7af84e34e --- /dev/null +++ b/src/input/readers/raw/Plugin.cc @@ -0,0 +1,43 @@ +// See the file in the main distribution directory for copyright. + +#include "Plugin.h" + +namespace plugin { namespace Bro_RawReader { Plugin plugin; } } + +using namespace plugin::Bro_RawReader; + +Plugin::Plugin() + { + init = false; + } + +plugin::Configuration Plugin::Configure() + { + AddComponent(new ::input::Component("Raw", ::input::reader::Raw::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::RawReader"; + config.description = "Raw input reader"; + return config; + } + +void Plugin::InitPreScript() + { + if ( pthread_mutex_init(&fork_mutex, 0) != 0 ) + reporter->FatalError("cannot initialize raw reader's mutex"); + + init = true; + } + +void Plugin::Done() + { + pthread_mutex_destroy(&fork_mutex); + init = false; + } + +pthread_mutex_t* Plugin::ForkMutex() + { + assert(init); + return &fork_mutex; + } + diff --git a/src/input/readers/raw/Plugin.h b/src/input/readers/raw/Plugin.h new file mode 100644 index 0000000000..59a5dfd2be --- /dev/null +++ b/src/input/readers/raw/Plugin.h @@ -0,0 +1,30 @@ +// See the file in the main distribution directory for copyright. + +#include "plugin/Plugin.h" + +#include "Raw.h" + +namespace plugin { +namespace Bro_RawReader { + +class Plugin : public plugin::Plugin { +public: + Plugin(); + + plugin::Configuration Configure(); + + virtual void InitPreScript(); + virtual void Done(); + + pthread_mutex_t * ForkMutex(); + +private: + bool init; + pthread_mutex_t fork_mutex; + +}; + +extern Plugin plugin; + +} +} diff --git a/src/input/readers/Raw.cc b/src/input/readers/raw/Raw.cc similarity index 98% rename from src/input/readers/Raw.cc rename to src/input/readers/raw/Raw.cc index 11976e2a11..259792cb3f 100644 --- a/src/input/readers/Raw.cc +++ b/src/input/readers/raw/Raw.cc @@ -1,10 +1,5 @@ // See the file "COPYING" in the main distribution directory for copyright. -#include "Raw.h" -#include "NetVar.h" - -#include "../../threading/SerialTypes.h" - #include #include #include @@ -14,6 +9,12 @@ #include #include +#include "Raw.h" +#include "Plugin.h" +#include "raw.bif.h" + +#include "threading/SerialTypes.h" + extern "C" { #include "setsignal.h" } @@ -23,12 +24,6 @@ using threading::Value; using threading::Field; const int Raw::block_size = 4096; // how big do we expect our chunks of data to be. -pthread_mutex_t Raw::fork_mutex; - -bool Raw::ClassInit() - { - return pthread_mutex_init(&fork_mutex, 0) == 0; - } Raw::Raw(ReaderFrontend *frontend) : ReaderBackend(frontend) { @@ -109,7 +104,7 @@ bool Raw::SetFDFlags(int fd, int cmd, int flags) bool Raw::LockForkMutex() { - int res = pthread_mutex_lock(&fork_mutex); + int res = pthread_mutex_lock(plugin::Bro_RawReader::plugin.ForkMutex()); if ( res == 0 ) return true; @@ -119,7 +114,7 @@ bool Raw::LockForkMutex() bool Raw::UnlockForkMutex() { - int res = pthread_mutex_unlock(&fork_mutex); + int res = pthread_mutex_unlock(plugin::Bro_RawReader::plugin.ForkMutex()); if ( res == 0 ) return true; diff --git a/src/input/readers/Raw.h b/src/input/readers/raw/Raw.h similarity index 96% rename from src/input/readers/Raw.h rename to src/input/readers/raw/Raw.h index c549125174..06568a6296 100644 --- a/src/input/readers/Raw.h +++ b/src/input/readers/raw/Raw.h @@ -6,7 +6,7 @@ #include #include -#include "../ReaderBackend.h" +#include "input/ReaderBackend.h" namespace input { namespace reader { @@ -21,8 +21,6 @@ public: static ReaderBackend* Instantiate(ReaderFrontend* frontend) { return new Raw(frontend); } - static bool ClassInit(); - protected: virtual bool DoInit(const ReaderInfo& info, int arg_num_fields, const threading::Field* const* fields); virtual void DoClose(); diff --git a/src/input/readers/raw/raw.bif b/src/input/readers/raw/raw.bif new file mode 100644 index 0000000000..becaf47f79 --- /dev/null +++ b/src/input/readers/raw/raw.bif @@ -0,0 +1,4 @@ + +module InputRaw; + +const record_separator: string; diff --git a/src/input/readers/sqlite/CMakeLists.txt b/src/input/readers/sqlite/CMakeLists.txt new file mode 100644 index 0000000000..3c513127dc --- /dev/null +++ b/src/input/readers/sqlite/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro SQLiteReader) +bro_plugin_cc(SQLite.cc Plugin.cc) +bro_plugin_bif(sqlite.bif) +bro_plugin_end() diff --git a/src/input/readers/sqlite/Plugin.cc b/src/input/readers/sqlite/Plugin.cc new file mode 100644 index 0000000000..db75d6dc22 --- /dev/null +++ b/src/input/readers/sqlite/Plugin.cc @@ -0,0 +1,24 @@ +// See the file in the main distribution directory for copyright. + +#include "plugin/Plugin.h" + +#include "SQLite.h" + +namespace plugin { +namespace Bro_SQLiteReader { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::input::Component("SQLite", ::input::reader::SQLite::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::SQLiteReader"; + config.description = "SQLite input reader"; + return config; + } +} plugin; + +} +} diff --git a/src/input/readers/SQLite.cc b/src/input/readers/sqlite/SQLite.cc similarity index 97% rename from src/input/readers/SQLite.cc rename to src/input/readers/sqlite/SQLite.cc index d032f934a7..3790e5919d 100644 --- a/src/input/readers/SQLite.cc +++ b/src/input/readers/sqlite/SQLite.cc @@ -2,16 +2,18 @@ #include "config.h" -#include "SQLite.h" -#include "NetVar.h" - #include #include #include #include #include -#include "../../threading/SerialTypes.h" +#include "SQLite.h" +#include "sqlite.bif.h" +#include "logging/writers/sqlite/sqlite.bif.h" +#include "logging/writers/ascii/ascii.bif.h" + +#include "threading/SerialTypes.h" using namespace input::reader; using threading::Value; diff --git a/src/input/readers/SQLite.h b/src/input/readers/sqlite/SQLite.h similarity index 97% rename from src/input/readers/SQLite.h rename to src/input/readers/sqlite/SQLite.h index a98b3e06b8..f4cae7d01f 100644 --- a/src/input/readers/SQLite.h +++ b/src/input/readers/sqlite/SQLite.h @@ -8,8 +8,7 @@ #include #include -#include "../ReaderBackend.h" - +#include "input/ReaderBackend.h" #include "threading/formatters/Ascii.h" #include "3rdparty/sqlite3.h" diff --git a/src/input/readers/sqlite/sqlite.bif b/src/input/readers/sqlite/sqlite.bif new file mode 100644 index 0000000000..60ea4e3051 --- /dev/null +++ b/src/input/readers/sqlite/sqlite.bif @@ -0,0 +1,6 @@ + +module InputSQLite; + +const set_separator: string; +const unset_field: string; +const empty_field: string; diff --git a/src/BPF_Program.cc b/src/iosource/BPF_Program.cc similarity index 74% rename from src/BPF_Program.cc rename to src/iosource/BPF_Program.cc index 5260429eb0..70469c97e7 100644 --- a/src/BPF_Program.cc +++ b/src/iosource/BPF_Program.cc @@ -58,7 +58,14 @@ int pcap_compile_nopcap(int snaplen_arg, int linktype_arg, } #endif -BPF_Program::BPF_Program() : m_compiled(), m_program() +// Simple heuristic to identify filters that always match, so that we can +// skip the filtering in that case. "ip or not ip" is Bro's default filter. +static bool filter_matches_anything(const char *filter) + { + return (! filter) || strlen(filter) == 0 || strcmp(filter, "ip or not ip") == 0; + } + +BPF_Program::BPF_Program() : m_compiled(), m_matches_anything(false), m_program() { } @@ -86,12 +93,14 @@ bool BPF_Program::Compile(pcap_t* pcap, const char* filter, uint32 netmask, } m_compiled = true; + m_matches_anything = filter_matches_anything(filter); return true; } bool BPF_Program::Compile(int snaplen, int linktype, const char* filter, - uint32 netmask, char* errbuf, bool optimize) + uint32 netmask, char* errbuf, unsigned int errbuf_len, + bool optimize) { FreeCode(); @@ -99,15 +108,23 @@ bool BPF_Program::Compile(int snaplen, int linktype, const char* filter, char my_error[PCAP_ERRBUF_SIZE]; int err = pcap_compile_nopcap(snaplen, linktype, &m_program, - (char *) filter, optimize, netmask, error); + (char *) filter, optimize, netmask, my_error); if ( err < 0 && errbuf ) - safe_strncpy(errbuf, my_errbuf, PCAP_ERRBUF_SIZE); + safe_strncpy(errbuf, my_error, errbuf_len); + *errbuf = '\0'; #else int err = pcap_compile_nopcap(snaplen, linktype, &m_program, (char*) filter, optimize, netmask); + + if ( err < 0 && errbuf && errbuf_len ) + *errbuf = '\0'; #endif + if ( err == 0 ) + { m_compiled = true; + m_matches_anything = filter_matches_anything(filter); + } return err == 0; } diff --git a/src/BPF_Program.h b/src/iosource/BPF_Program.h similarity index 82% rename from src/BPF_Program.h rename to src/iosource/BPF_Program.h index 88ed669da2..88a4512d4e 100644 --- a/src/BPF_Program.h +++ b/src/iosource/BPF_Program.h @@ -30,12 +30,17 @@ public: // similarly to pcap_compile_nopcap(). Parameters are // similar. Returns true on success. bool Compile(int snaplen, int linktype, const char* filter, - uint32 netmask, char* errbuf = 0, bool optimize = true); + uint32 netmask, char* errbuf = 0, unsigned int errbuf_len = 0, + bool optimize = true); // Returns true if this program currently contains compiled // code, false otherwise. bool IsCompiled() { return m_compiled; } + // Returns true if this program matches any packets. This is not + // comprehensive, but can identify a few cases where it does. + bool MatchesAnything() { return m_matches_anything; } + // Accessor to the compiled program. Returns nil when // no program is currently compiled. bpf_program* GetProgram(); @@ -46,6 +51,7 @@ protected: // (I like to prefix member variables with m_, makes it clear // in the implementation whether it's a global or not. --ck) bool m_compiled; + bool m_matches_anything; struct bpf_program m_program; }; diff --git a/src/iosource/CMakeLists.txt b/src/iosource/CMakeLists.txt new file mode 100644 index 0000000000..a36667aee7 --- /dev/null +++ b/src/iosource/CMakeLists.txt @@ -0,0 +1,23 @@ + +include(BroSubdir) + +include_directories(BEFORE + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} +) + +add_subdirectory(pcap) + +set(iosource_SRCS + BPF_Program.cc + Component.cc + Manager.cc + PktDumper.cc + PktSrc.cc +) + +bif_target(pcap.bif) + +bro_add_subdir_library(iosource ${iosource_SRCS}) +add_dependencies(bro_iosource generate_outputs) + diff --git a/src/iosource/Component.cc b/src/iosource/Component.cc new file mode 100644 index 0000000000..a285cd8552 --- /dev/null +++ b/src/iosource/Component.cc @@ -0,0 +1,165 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Component.h" + +#include "Desc.h" +#include "Reporter.h" + +using namespace iosource; + +Component::Component(const std::string& name) + : plugin::Component(plugin::component::IOSOURCE, name) + { + } + +Component::Component(plugin::component::Type type, const std::string& name) + : plugin::Component(type, name) + { + } + +Component::~Component() + { + } + +PktSrcComponent::PktSrcComponent(const std::string& arg_name, const std::string& arg_prefix, InputType arg_type, factory_callback arg_factory) + : iosource::Component(plugin::component::PKTSRC, arg_name) + { + tokenize_string(arg_prefix, ":", &prefixes); + type = arg_type; + factory = arg_factory; + } + +PktSrcComponent::~PktSrcComponent() + { + } + +const std::vector& PktSrcComponent::Prefixes() const + { + return prefixes; + } + +bool PktSrcComponent::HandlesPrefix(const string& prefix) const + { + for ( std::vector::const_iterator i = prefixes.begin(); + i != prefixes.end(); i++ ) + { + if ( *i == prefix ) + return true; + } + + return false; + } + +bool PktSrcComponent::DoesLive() const + { + return type == LIVE || type == BOTH; + } + +bool PktSrcComponent::DoesTrace() const + { + return type == TRACE || type == BOTH; + } + +PktSrcComponent::factory_callback PktSrcComponent::Factory() const + { + return factory; + } + +void PktSrcComponent::DoDescribe(ODesc* d) const + { + iosource::Component::DoDescribe(d); + + string prefs; + + for ( std::vector::const_iterator i = prefixes.begin(); + i != prefixes.end(); i++ ) + { + if ( prefs.size() ) + prefs += ", "; + + prefs += '"' + *i + '"'; + } + + d->Add("interface prefix"); + if ( prefixes.size() > 1 ) + d->Add("es"); + + d->Add(" "); + d->Add(prefs); + d->Add("; supports "); + + switch ( type ) { + case LIVE: + d->Add("live input"); + break; + + case TRACE: + d->Add("trace input"); + break; + + case BOTH: + d->Add("live and trace input"); + break; + + default: + reporter->InternalError("unknown PkrSrc type"); + } + + } + +PktDumperComponent::PktDumperComponent(const std::string& name, const std::string& arg_prefix, factory_callback arg_factory) + : plugin::Component(plugin::component::PKTDUMPER, name) + { + tokenize_string(arg_prefix, ":", &prefixes); + factory = arg_factory; + } + +PktDumperComponent::~PktDumperComponent() + { + } + +PktDumperComponent::factory_callback PktDumperComponent::Factory() const + { + return factory; + } + +const std::vector& PktDumperComponent::Prefixes() const + { + return prefixes; + } + +bool PktDumperComponent::HandlesPrefix(const string& prefix) const + { + for ( std::vector::const_iterator i = prefixes.begin(); + i != prefixes.end(); i++ ) + { + if ( *i == prefix ) + return true; + } + + return false; + } + +void PktDumperComponent::DoDescribe(ODesc* d) const + { + plugin::Component::DoDescribe(d); + + string prefs; + + for ( std::vector::const_iterator i = prefixes.begin(); + i != prefixes.end(); i++ ) + { + if ( prefs.size() ) + prefs += ", "; + + prefs += '"' + *i + '"'; + } + + d->Add("dumper prefix"); + + if ( prefixes.size() > 1 ) + d->Add("es"); + + d->Add(": "); + d->Add(prefs); + } diff --git a/src/iosource/Component.h b/src/iosource/Component.h new file mode 100644 index 0000000000..4a38a9cd22 --- /dev/null +++ b/src/iosource/Component.h @@ -0,0 +1,177 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef IOSOURCE_PLUGIN_COMPONENT_H +#define IOSOURCE_PLUGIN_COMPONENT_H + +#include +#include + +#include "plugin/Component.h" + +namespace iosource { + +class IOSource; +class PktSrc; +class PktDumper; + +/** + * Component description for plugins providing IOSources. + */ +class Component : public plugin::Component { +public: + typedef IOSource* (*factory_callback)(); + + /** + * Constructor. + * + * @param name A descriptive name for the component. This name must + * be unique across all components of this type. + */ + Component(const std::string& name); + + /** + * Copy constructor. + */ + Component(const Component& other); + + /** + * Destructor. + */ + ~Component(); + +protected: + /** + * Constructor to use by derived classes. + * + * @param type The type of the componnent. + * + * @param name A descriptive name for the component. This name must + * be unique across all components of this type. + */ + Component(plugin::component::Type type, const std::string& name); +}; + +/** + * Component description for plugins providing a PktSrc for packet input. + */ +class PktSrcComponent : public iosource::Component { +public: + /** + * Type of input a packet source supports. + */ + enum InputType { + LIVE, ///< Live input. + TRACE, ///< Offline input from trace file. + BOTH ///< Live input as well as offline. + }; + + typedef PktSrc* (*factory_callback)(const std::string& path, bool is_live); + + /** + * Constructor. + * + * @param name A descriptive name for the component. This name must + * be unique across all components of this type. + * + * @param prefixes The list of interface/file prefixes associated + * with this component. + * + * @param type Type of input the component supports. + * + * @param factor Factory function to instantiate component. + */ + PktSrcComponent(const std::string& name, const std::string& prefixes, InputType type, factory_callback factory); + + /** + * Destructor. + */ + virtual ~PktSrcComponent(); + + /** + * Returns the prefix(es) passed to the constructor. + */ + const std::vector& Prefixes() const; + + /** + * Returns true if the given prefix is among the one specified for the component. + */ + bool HandlesPrefix(const std::string& prefix) const; + + /** + * Returns true if packet source instantiated by the component handle + * live traffic. + */ + bool DoesLive() const; + + /** + * Returns true if packet source instantiated by the component handle + * offline traces. + */ + bool DoesTrace() const; + + /** + * Returns the source's factory function. + */ + factory_callback Factory() const; + + /** + * Generates a human-readable description of the component. This goes + * into the output of \c "bro -NN". + */ + virtual void DoDescribe(ODesc* d) const; + +private: + std::vector prefixes; + InputType type; + factory_callback factory; +}; + +/** + * Component description for plugins providing a PktDumper for packet output. + * + * PktDumpers aren't IOSurces but we locate them here to keep them along with + * the PktSrc. + */ +class PktDumperComponent : public plugin::Component { +public: + typedef PktDumper* (*factory_callback)(const std::string& path, bool append); + + /** + * XXX + */ + PktDumperComponent(const std::string& name, const std::string& prefixes, factory_callback factory); + + /** + * Destructor. + */ + ~PktDumperComponent(); + + /** + * Returns the prefix(es) passed to the constructor. + */ + const std::vector& Prefixes() const; + + /** + * Returns true if the given prefix is among the one specified for the component. + */ + bool HandlesPrefix(const std::string& prefix) const; + + /** + * Returns the source's factory function. + */ + factory_callback Factory() const; + + /** + * Generates a human-readable description of the component. This goes + * into the output of \c "bro -NN". + */ + virtual void DoDescribe(ODesc* d) const; + +private: + std::vector prefixes; + factory_callback factory; +}; + +} + +#endif diff --git a/src/iosource/FD_Set.h b/src/iosource/FD_Set.h new file mode 100644 index 0000000000..61e3e7a59b --- /dev/null +++ b/src/iosource/FD_Set.h @@ -0,0 +1,104 @@ +#ifndef BRO_FD_SET_H +#define BRO_FD_SET_H + +#include +#include + +namespace iosource { + +/** + * A container holding a set of file descriptors. + */ +class FD_Set { +public: + + /** + * Constructor. The set is initially empty. + */ + FD_Set() : max(-1), fds() + { } + + /** + * Insert a file descriptor in to the set. + * @param fd the fd to insert in the set. + * @return false if fd was already in the set, else true. + */ + bool Insert(int fd) + { + if ( max < fd ) + max = fd; + + return fds.insert(fd).second; + } + + /** + * Inserts all the file descriptors from another set in to this one. + * @param other a file descriptor set to merge in to this one. + */ + void Insert(const FD_Set& other) + { + for ( std::set::const_iterator it = other.fds.begin(); + it != other.fds.end(); ++it ) + Insert(*it); + } + + /** + * Empties the set. + */ + void Clear() + { max = -1; fds.clear(); } + + /** + * Insert file descriptors in to a fd_set for use with select(). + * @return the greatest file descriptor inserted. + */ + int Set(fd_set* set) const + { + for ( std::set::const_iterator it = fds.begin(); it != fds.end(); + ++it ) + FD_SET(*it, set); + + return max; + } + + /** + * @return Whether a file descriptor belonging to this set is within the + * fd_set arugment. + */ + bool Ready(fd_set* set) const + { + for ( std::set::const_iterator it = fds.begin(); it != fds.end(); + ++it ) + { + if ( FD_ISSET(*it, set) ) + return true; + } + + return false; + } + + /** + * @return whether any file descriptors have been added to the set. + */ + bool Empty() const + { + return fds.empty(); + } + + /** + * @return the greatest file descriptor of all that have been added to the + * set, or -1 if the set is empty. + */ + int Max() const + { + return max; + } + +private: + int max; + std::set fds; +}; + +} // namespace bro + +#endif // BRO_FD_SET_H diff --git a/src/iosource/IOSource.h b/src/iosource/IOSource.h new file mode 100644 index 0000000000..df82012268 --- /dev/null +++ b/src/iosource/IOSource.h @@ -0,0 +1,140 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef IOSOURCE_IOSOURCE_H +#define IOSOURCE_IOSOURCE_H + +extern "C" { +#include +} + +#include +#include "FD_Set.h" +#include "Timer.h" + +namespace iosource { + +/** + * Interface class for components providing/consuming data inside Bro's main + * loop. + */ +class IOSource { +public: + /** + * Constructor. + */ + IOSource() { idle = false; closed = false; } + + /** + * Destructor. + */ + virtual ~IOSource() {} + + /** + * Returns true if source has nothing ready to process. + */ + bool IsIdle() const { return idle; } + + /** + * Returns true if more data is to be expected in the future. + * Otherwise, source may be removed. + */ + bool IsOpen() const { return ! closed; } + + /** + * Initializes the source. Can be overwritten by derived classes. + */ + virtual void Init() { } + + /** + * Finalizes the source when it's being closed. Can be overwritten by + * derived classes. + */ + virtual void Done() { } + + /** + * Returns select'able file descriptors for this source. Leaves the + * passed values untouched if not available. + * + * @param read Pointer to container where to insert a read descriptor. + * + * @param write Pointer to container where to insert a write descriptor. + * + * @param except Pointer to container where to insert a except descriptor. + */ + virtual void GetFds(FD_Set* read, FD_Set* write, FD_Set* except) = 0; + + /** + * Returns the timestamp (in \a global network time) associated with + * next data item from this source. If the source wants the data + * item to be processed with a local network time, it sets the + * argument accordingly. + * + * This method will be called only when either IsIdle() returns + * false, or select() on one of the fds returned by GetFDs() + * indicates that there's data to process. + * + * Must be overridden by derived classes. + * + * @param network_time A pointer to store the \a local network time + * associated with the next item (as opposed to global network time). + * + * @return The global network time of the next entry, or a value + * smaller than zero if none is available currently. + */ + virtual double NextTimestamp(double* network_time) = 0; + + /** + * Processes and consumes next data item. + * + * This method will be called only when either IsIdle() returns + * false, or select() on one of the fds returned by GetFDs() + * indicates that there's data to process. + * + * Must be overridden by derived classes. + */ + virtual void Process() = 0; + + /** + * Returns the tag of the timer manafger associated with the last + * procesees data item. + * + * Can be overridden by derived classes. + * + * @return The tag, or null for the global timer manager. + * + */ + virtual TimerMgr::Tag* GetCurrentTag() { return 0; } + + /** + * Returns a descriptual tag representing the source for debugging. + * + * Can be overridden by derived classes. + * + * @return The debugging name. + */ + virtual const char* Tag() = 0; + +protected: + /* + * Callback for derived classes to call when they have gone dry + * temporarily. + * + * @param is_idle True if the source is idle currently. + */ + void SetIdle(bool is_idle) { idle = is_idle; } + + /* + * Callback for derived class to call when they have shutdown. + * + * @param is_closed True if the source is now closed. + */ + void SetClosed(bool is_closed) { closed = is_closed; } + +private: + bool idle; + bool closed; +}; + +} + +#endif diff --git a/src/iosource/Manager.cc b/src/iosource/Manager.cc new file mode 100644 index 0000000000..f71807dcbe --- /dev/null +++ b/src/iosource/Manager.cc @@ -0,0 +1,314 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include +#include +#include +#include + +#include + +#include "Manager.h" +#include "IOSource.h" +#include "PktSrc.h" +#include "PktDumper.h" +#include "plugin/Manager.h" + +#include "util.h" + +#define DEFAULT_PREFIX "pcap" + +using namespace iosource; + +Manager::~Manager() + { + for ( SourceList::iterator i = sources.begin(); i != sources.end(); ++i ) + { + (*i)->src->Done(); + delete *i; + } + + sources.clear(); + + for ( PktDumperList::iterator i = pkt_dumpers.begin(); i != pkt_dumpers.end(); ++i ) + { + (*i)->Done(); + delete *i; + } + + pkt_dumpers.clear(); + } + +void Manager::RemoveAll() + { + // We're cheating a bit here ... + dont_counts = sources.size(); + } + +IOSource* Manager::FindSoonest(double* ts) + { + // Remove sources which have gone dry. For simplicity, we only + // remove at most one each time. + for ( SourceList::iterator i = sources.begin(); + i != sources.end(); ++i ) + if ( ! (*i)->src->IsOpen() ) + { + (*i)->src->Done(); + delete *i; + sources.erase(i); + break; + } + + // Ideally, we would always call select on the fds to see which + // are ready, and return the soonest. Unfortunately, that'd mean + // one select-call per packet, which we can't afford in high-volume + // environments. Thus, we call select only every SELECT_FREQUENCY + // call (or if all sources report that they are dry). + + ++call_count; + + IOSource* soonest_src = 0; + double soonest_ts = 1e20; + double soonest_local_network_time = 1e20; + bool all_idle = true; + + // Find soonest source of those which tell us they have something to + // process. + for ( SourceList::iterator i = sources.begin(); i != sources.end(); ++i ) + { + if ( ! (*i)->src->IsIdle() ) + { + all_idle = false; + double local_network_time = 0; + double ts = (*i)->src->NextTimestamp(&local_network_time); + if ( ts > 0 && ts < soonest_ts ) + { + soonest_ts = ts; + soonest_src = (*i)->src; + soonest_local_network_time = + local_network_time ? + local_network_time : ts; + } + } + } + + // If we found one and aren't going to select this time, + // return it. + int maxx = 0; + + if ( soonest_src && (call_count % SELECT_FREQUENCY) != 0 ) + goto finished; + + // Select on the join of all file descriptors. + fd_set fd_read, fd_write, fd_except; + + FD_ZERO(&fd_read); + FD_ZERO(&fd_write); + FD_ZERO(&fd_except); + + for ( SourceList::iterator i = sources.begin(); + i != sources.end(); ++i ) + { + Source* src = (*i); + + if ( ! src->src->IsIdle() ) + // No need to select on sources which we know to + // be ready. + continue; + + src->Clear(); + src->src->GetFds(&src->fd_read, &src->fd_write, &src->fd_except); + if ( src->fd_read.Empty() ) src->fd_read.Insert(0); + if ( src->fd_write.Empty() ) src->fd_write.Insert(0); + if ( src->fd_except.Empty() ) src->fd_except.Insert(0); + src->SetFds(&fd_read, &fd_write, &fd_except, &maxx); + } + + // We can't block indefinitely even when all sources are dry: + // we're doing some IOSource-independent stuff in the main loop, + // so we need to return from time to time. (Instead of no time-out + // at all, we use a very small one. This lets FreeBSD trigger a + // BPF buffer switch on the next read when the hold buffer is empty + // while the store buffer isn't filled yet. + + struct timeval timeout; + + if ( all_idle ) + { + // Interesting: when all sources are dry, simply sleeping a + // bit *without* watching for any fd becoming ready may + // decrease CPU load. I guess that's because it allows + // the kernel's packet buffers to fill. - Robin + timeout.tv_sec = 0; + timeout.tv_usec = 20; // SELECT_TIMEOUT; + select(0, 0, 0, 0, &timeout); + } + + if ( ! maxx ) + // No selectable fd at all. + goto finished; + + timeout.tv_sec = 0; + timeout.tv_usec = 0; + + if ( select(maxx + 1, &fd_read, &fd_write, &fd_except, &timeout) > 0 ) + { // Find soonest. + for ( SourceList::iterator i = sources.begin(); + i != sources.end(); ++i ) + { + Source* src = (*i); + + if ( ! src->src->IsIdle() ) + continue; + + if ( src->Ready(&fd_read, &fd_write, &fd_except) ) + { + double local_network_time = 0; + double ts = src->src->NextTimestamp(&local_network_time); + if ( ts > 0.0 && ts < soonest_ts ) + { + soonest_ts = ts; + soonest_src = src->src; + soonest_local_network_time = + local_network_time ? + local_network_time : ts; + } + } + } + } + +finished: + *ts = soonest_local_network_time; + return soonest_src; + } + +void Manager::Register(IOSource* src, bool dont_count) + { + src->Init(); + Source* s = new Source; + s->src = src; + if ( dont_count ) + ++dont_counts; + + sources.push_back(s); + } + +void Manager::Register(PktSrc* src) + { + pkt_srcs.push_back(src); + Register(src, false); + } + +static std::pair split_prefix(std::string path) + { + // See if the path comes with a prefix telling us which type of + // PktSrc to use. If not, choose default. + std::string prefix; + + std::string::size_type i = path.find("::"); + if ( i != std::string::npos ) + { + prefix = path.substr(0, i); + path = path.substr(i + 2, std::string::npos); + } + + else + prefix= DEFAULT_PREFIX; + + return std::make_pair(prefix, path); + } + +PktSrc* Manager::OpenPktSrc(const std::string& path, bool is_live) + { + std::pair t = split_prefix(path); + std::string prefix = t.first; + std::string npath = t.second; + + // Find the component providing packet sources of the requested prefix. + + PktSrcComponent* component = 0; + + std::list all_components = plugin_mgr->Components(); + + for ( std::list::const_iterator i = all_components.begin(); + i != all_components.end(); i++ ) + { + PktSrcComponent* c = *i; + + if ( c->HandlesPrefix(prefix) && + (( is_live && c->DoesLive() ) || + (! is_live && c->DoesTrace())) ) + { + component = c; + break; + } + } + + + if ( ! component ) + reporter->FatalError("type of packet source '%s' not recognized, or mode not supported", prefix.c_str()); + + // Instantiate packet source. + + PktSrc* ps = (*component->Factory())(npath, is_live); + assert(ps); + + if ( ! ps->IsOpen() && ps->IsError() ) + // Set an error message if it didn't open successfully. + ps->Error("could not open"); + + DBG_LOG(DBG_PKTIO, "Created packet source of type %s for %s", component->Name().c_str(), npath.c_str()); + + Register(ps); + return ps; + } + + +PktDumper* Manager::OpenPktDumper(const string& path, bool append) + { + std::pair t = split_prefix(path); + std::string prefix = t.first; + std::string npath = t.second; + + // Find the component providing packet dumpers of the requested prefix. + + PktDumperComponent* component = 0; + + std::list all_components = plugin_mgr->Components(); + + for ( std::list::const_iterator i = all_components.begin(); + i != all_components.end(); i++ ) + { + if ( (*i)->HandlesPrefix(prefix) ) + { + component = (*i); + break; + } + } + + if ( ! component ) + reporter->FatalError("type of packet dumper '%s' not recognized", prefix.c_str()); + + // Instantiate packet dumper. + + PktDumper* pd = (*component->Factory())(npath, append); + assert(pd); + + if ( ! pd->IsOpen() && pd->IsError() ) + // Set an error message if it didn't open successfully. + pd->Error("could not open"); + + DBG_LOG(DBG_PKTIO, "Created packer dumper of type %s for %s", component->Name().c_str(), npath.c_str()); + + pd->Init(); + pkt_dumpers.push_back(pd); + + return pd; + } + +void Manager::Source::SetFds(fd_set* read, fd_set* write, fd_set* except, + int* maxx) const + { + *maxx = std::max(*maxx, fd_read.Set(read)); + *maxx = std::max(*maxx, fd_write.Set(write)); + *maxx = std::max(*maxx, fd_except.Set(except)); + } diff --git a/src/iosource/Manager.h b/src/iosource/Manager.h new file mode 100644 index 0000000000..fb4f6676b6 --- /dev/null +++ b/src/iosource/Manager.h @@ -0,0 +1,146 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef IOSOURCE_MANAGER_H +#define IOSOURCE_MANAGER_H + +#include +#include +#include "iosource/FD_Set.h" + +namespace iosource { + +class IOSource; +class PktSrc; +class PktDumper; + +/** + * Singleton class managing all IOSources. + */ +class Manager { +public: + /** + * Constructor. + */ + Manager() { call_count = 0; dont_counts = 0; } + + /** + * Destructor. + */ + ~Manager(); + + /** + * Registers an IOSource with the manager. + * + * @param src The source. The manager takes ownership. + * + * @param dont_count If true, this source does not contribute to the + * number of IOSources returned by Size(). The effect is that if all + * sources except for the non-counting ones have gone dry, processing + * will shut down. + */ + void Register(IOSource* src, bool dont_count = false); + + /** + * Returns the packet source with the soonest available input. This + * may block for a little while if all are dry. + * + * @param ts A pointer where to store the timestamp of the input that + * the soonest source has available next. + * + * @return The source, or null if no source has input. + */ + IOSource* FindSoonest(double* ts); + + /** + * Returns the number of registered and still active sources, + * excluding those that are registered as \a dont_cont. + */ + int Size() const { return sources.size() - dont_counts; } + + typedef std::list PktSrcList; + + /** + * Returns a list of all registered PktSrc instances. This is a + * subset of all registered IOSource instances. + */ + const PktSrcList& GetPktSrcs() const { return pkt_srcs; } + + /** + * Terminate all processing immediately by removing all sources (and + * therefore now returning a Size() of zero). + */ + void Terminate() { RemoveAll(); } + + /** + * Opens a new packet source. + * + * @param path The interface or file name, as one would give to Bro \c -i. + * + * @param is_live True if \a path represents a live interface, false + * for a file. + * + * @return The new packet source, or null if an error occured. + */ + PktSrc* OpenPktSrc(const std::string& path, bool is_live); + + /** + * Opens a new packet dumper. + * + * @param path The file name to dump into. + * + * @param append True to append if \a path already exists. + * + * @return The new packet dumper, or null if an error occured. + */ + PktDumper* OpenPktDumper(const std::string& path, bool append); + +private: + /** + * When looking for a source with something to process, every + * SELECT_FREQUENCY calls we will go ahead and block on a select(). + */ + static const int SELECT_FREQUENCY = 25; + + /** + * Microseconds to wait in an empty select if no source is ready. + */ + static const int SELECT_TIMEOUT = 50; + + void Register(PktSrc* src); + void RemoveAll(); + + unsigned int call_count; + int dont_counts; + + struct Source { + IOSource* src; + FD_Set fd_read; + FD_Set fd_write; + FD_Set fd_except; + + bool Ready(fd_set* read, fd_set* write, fd_set* except) const + { return fd_read.Ready(read) || fd_write.Ready(write) || + fd_except.Ready(except); } + + void SetFds(fd_set* read, fd_set* write, fd_set* except, + int* maxx) const; + + void Clear() + { fd_read.Clear(); fd_write.Clear(); fd_except.Clear(); } + }; + + typedef std::list SourceList; + SourceList sources; + + typedef std::list PktDumperList; + + PktSrcList pkt_srcs; + PktDumperList pkt_dumpers; +}; + +} + +extern iosource::Manager* iosource_mgr; + +#endif + diff --git a/src/iosource/PktDumper.cc b/src/iosource/PktDumper.cc new file mode 100644 index 0000000000..a4bc3a82f8 --- /dev/null +++ b/src/iosource/PktDumper.cc @@ -0,0 +1,84 @@ + +// See the file "COPYING" in the main distribution directory for copyright. + +#include +#include + +#include "config.h" + +#include "PktDumper.h" + +using namespace iosource; + +PktDumper::PktDumper() + { + is_open = false; + errmsg = ""; + } + +PktDumper::~PktDumper() + { + } + +void PktDumper::Init() + { + Open(); + } + +void PktDumper::Done() + { + Close(); + } + +const std::string& PktDumper::Path() const + { + return props.path; + } + +bool PktDumper::IsOpen() const + { + return is_open; + } + +double PktDumper::OpenTime() const + { + return is_open ? props.open_time : 0; + } + +bool PktDumper::IsError() const + { + return errmsg.size(); + } + +const char* PktDumper::ErrorMsg() const + { + return errmsg.size() ? errmsg.c_str() : 0; + } + +int PktDumper::HdrSize() const + { + return is_open ? props.hdr_size : -1; + } + +void PktDumper::Opened(const Properties& arg_props) + { + is_open = true; + props = arg_props; + DBG_LOG(DBG_PKTIO, "Opened dumper %s", props.path.c_str()); + } + +void PktDumper::Closed() + { + is_open = false; + DBG_LOG(DBG_PKTIO, "Closed dumper %s", props.path.c_str()); + props.path = ""; + } + +void PktDumper::Error(const std::string& msg) + { + errmsg = msg; + + DBG_LOG(DBG_PKTIO, "Error with dumper %s: %s", + IsOpen() ? props.path.c_str() : "", + msg.c_str()); + } diff --git a/src/iosource/PktDumper.h b/src/iosource/PktDumper.h new file mode 100644 index 0000000000..56555c247a --- /dev/null +++ b/src/iosource/PktDumper.h @@ -0,0 +1,163 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef IOSOURCE_PKTSRC_PKTDUMPER_H +#define IOSOURCE_PKTSRC_PKTDUMPER_H + +#include "IOSource.h" + +namespace iosource { + +/** + * Base class for packet dumpers. + */ +class PktDumper { +public: + /** + * Structure describing a packet. + */ + struct Packet { + /** + * The pcap header associated with the packet. + */ + const struct pcap_pkthdr* hdr; + + /** + * The full content of the packet. + */ + const unsigned char* data; + }; + + /** + * Constructor. + */ + PktDumper(); + + /** + * Destructor. + */ + virtual ~PktDumper(); + + /** + * Returns the path associated with the dumper. + */ + const std::string& Path() const; + + /** + * Returns true if the dumper is open for writing. + */ + bool IsOpen() const; + + /** + * Returns the time when the dumper was opened for writing. + */ + double OpenTime() const; + + /** + * Returns returns true if the dumper has encountered an error. + */ + bool IsError() const; + + /** + * Returns if the dumper has encountered an error, returns a + * corresponding error message. Returns an emoty string otherwise. + */ + const char* ErrorMsg() const; + + /** + * Returns the size of the link-layer headers with this dumper. + */ + int HdrSize() const; + + /** + * Writes a packet to the dumper. + * + * @param pkt The packet to record. + */ + bool Record(const Packet* pkt); + + // PktDumper interface for derived classes to implement. + + /** + * Called by the manager system to open the source. + * + * Derived classes must implement this method. If successful, the + * implementation must call \a Opened(); if not, it must call Error() + * with a corresponding message. + */ + virtual void Open() = 0; + + /** + * Called by the manager system to close the dumper. + * + * Derived classes must implement this method. If successful, the + * implementation must call \a Closed(); if not, it must call Error() + * with a corresponding message. + */ + virtual void Close() = 0; + + /** + * Called to write a packet to the dumper. + * + * Derived classes must implement this method. + * + * @param pkt The packet to record. + * + * @return True if succesful, false otherwise (in which case \a + * Error() must have been called.) + */ + virtual bool Dump(const Packet* pkt) = 0; + +protected: + friend class Manager; + + /** + * Structure to pass back information about the packet dumper to the + * base class. Derived class pass an instance of this to \a Opened(). + */ + struct Properties { + std::string path; + int hdr_size; + double open_time; + }; + + /** + * Called from the implementations of \a Open() to signal that the + * source has been successully opened. + * + * @param props A properties instance describing the now open source. + */ + void Opened(const Properties& props); + + /** + * Called from the implementations of \a Close() to signal that the + * source has been closed. + */ + void Closed(); + + /** + * Called from derived classes to signal an error. + * + * @param msg A corresponding error message. + */ + void Error(const std::string& msg); + + /** + * Called by the manager to initialize the dumper. + */ + void Init(); + + /** + * Called by the manager to shutdown the dumper. + */ + void Done(); + +private: + bool is_open; + Properties props; + + std::string errmsg; +}; + +} + +#endif diff --git a/src/iosource/PktSrc.cc b/src/iosource/PktSrc.cc new file mode 100644 index 0000000000..eaf85bbfa4 --- /dev/null +++ b/src/iosource/PktSrc.cc @@ -0,0 +1,525 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include +#include + +#include "config.h" + +#include "util.h" +#include "PktSrc.h" +#include "Hash.h" +#include "Net.h" +#include "Sessions.h" + +using namespace iosource; + +PktSrc::PktSrc() + { + have_packet = false; + errbuf = ""; + SetClosed(true); + + next_sync_point = 0; + first_timestamp = 0.0; + first_wallclock = current_wallclock = 0; + } + +PktSrc::~PktSrc() + { + BPF_Program* code; + IterCookie* cookie = filters.InitForIteration(); + while ( (code = filters.NextEntry(cookie)) ) + delete code; + } + +const std::string& PktSrc::Path() const + { + static std::string not_open("not open"); + return IsOpen() ? props.path : not_open; + } + +const char* PktSrc::ErrorMsg() const + { + return errbuf.size() ? errbuf.c_str() : 0; + } + +int PktSrc::LinkType() const + { + return IsOpen() ? props.link_type : -1; + } + +uint32 PktSrc::Netmask() const + { + return IsOpen() ? props.netmask : PCAP_NETMASK_UNKNOWN; + } + +bool PktSrc::IsError() const + { + return ErrorMsg(); + } + +int PktSrc::HdrSize() const + { + return IsOpen() ? props.hdr_size : -1; + } + +int PktSrc::SnapLen() const + { + return snaplen; // That's a global. Change? + } + +bool PktSrc::IsLive() const + { + return props.is_live; + } + +double PktSrc::CurrentPacketTimestamp() + { + return current_pseudo; + } + +double PktSrc::CurrentPacketWallClock() + { + // We stop time when we are suspended. + if ( net_is_processing_suspended() ) + current_wallclock = current_time(true); + + return current_wallclock; + } + +void PktSrc::Opened(const Properties& arg_props) + { + if ( arg_props.hdr_size < 0 ) + { + char buf[512]; + safe_snprintf(buf, sizeof(buf), + "unknown data link type 0x%x", props.link_type); + Error(buf); + Close(); + return; + } + + props = arg_props; + SetClosed(false); + + if ( ! PrecompileFilter(0, "") || ! SetFilter(0) ) + { + Close(); + return; + } + + if ( props.is_live ) + Info(fmt("listening on %s, capture length %d bytes\n", props.path.c_str(), SnapLen())); + + DBG_LOG(DBG_PKTIO, "Opened source %s", props.path.c_str()); + } + +void PktSrc::Closed() + { + SetClosed(true); + + DBG_LOG(DBG_PKTIO, "Closed source %s", props.path.c_str()); + } + +void PktSrc::Error(const std::string& msg) + { + // We don't report this immediately, Bro will ask us for the error + // once it notices we aren't open. + errbuf = msg; + DBG_LOG(DBG_PKTIO, "Error with source %s: %s", + IsOpen() ? props.path.c_str() : "", + msg.c_str()); + } + +void PktSrc::Info(const std::string& msg) + { + reporter->Info("%s", msg.c_str()); + } + +void PktSrc::Weird(const std::string& msg, const Packet* p) + { + sessions->Weird(msg.c_str(), p->hdr, p->data, 0); + } + +void PktSrc::InternalError(const std::string& msg) + { + reporter->InternalError("%s", msg.c_str()); + } + +void PktSrc::ContinueAfterSuspend() + { + current_wallclock = current_time(true); + } + +int PktSrc::GetLinkHeaderSize(int link_type) + { + switch ( link_type ) { + case DLT_NULL: + return 4; + + case DLT_EN10MB: + return 14; + + case DLT_FDDI: + return 13 + 8; // fddi_header + LLC + +#ifdef DLT_LINUX_SLL + case DLT_LINUX_SLL: + return 16; +#endif + + case DLT_PPP_SERIAL: // PPP_SERIAL + return 4; + + case DLT_RAW: + return 0; + } + + return -1; + } + +double PktSrc::CheckPseudoTime() + { + if ( ! IsOpen() ) + return 0; + + if ( ! ExtractNextPacketInternal() ) + return 0; + + if ( remote_trace_sync_interval ) + { + if ( next_sync_point == 0 || current_packet.ts >= next_sync_point ) + { + int n = remote_serializer->SendSyncPoint(); + next_sync_point = first_timestamp + + n * remote_trace_sync_interval; + remote_serializer->Log(RemoteSerializer::LogInfo, + fmt("stopping at packet %.6f, next sync-point at %.6f", + current_packet.ts, next_sync_point)); + + return 0; + } + } + + double pseudo_time = current_packet.ts - first_timestamp; + double ct = (current_time(true) - first_wallclock) * pseudo_realtime; + + return pseudo_time <= ct ? bro_start_time + pseudo_time : 0; + } + +void PktSrc::Init() + { + Open(); + } + +void PktSrc::Done() + { + if ( IsOpen() ) + Close(); + } + +void PktSrc::GetFds(iosource::FD_Set* read, iosource::FD_Set* write, + iosource::FD_Set* except) + { + if ( pseudo_realtime ) + { + // Select would give erroneous results. But we simulate it + // by setting idle accordingly. + SetIdle(CheckPseudoTime() == 0); + return; + } + + if ( IsOpen() && props.selectable_fd >= 0 ) + read->Insert(props.selectable_fd); + } + +double PktSrc::NextTimestamp(double* local_network_time) + { + if ( ! IsOpen() ) + return -1.0; + + if ( ! ExtractNextPacketInternal() ) + return -1.0; + + if ( pseudo_realtime ) + { + // Delay packet if necessary. + double packet_time = CheckPseudoTime(); + if ( packet_time ) + return packet_time; + + SetIdle(true); + return -1.0; + } + + return current_packet.ts; + } + +void PktSrc::Process() + { + if ( ! IsOpen() ) + return; + + if ( ! ExtractNextPacketInternal() ) + return; + + int pkt_hdr_size = props.hdr_size; + + // Unfortunately some packets on the link might have MPLS labels + // while others don't. That means we need to ask the link-layer if + // labels are in place. + bool have_mpls = false; + + int protocol = 0; + const u_char* data = current_packet.data; + + switch ( props.link_type ) { + case DLT_NULL: + { + protocol = (data[3] << 24) + (data[2] << 16) + (data[1] << 8) + data[0]; + + // From the Wireshark Wiki: "AF_INET6, unfortunately, has + // different values in {NetBSD,OpenBSD,BSD/OS}, + // {FreeBSD,DragonFlyBSD}, and {Darwin/Mac OS X}, so an IPv6 + // packet might have a link-layer header with 24, 28, or 30 + // as the AF_ value." As we may be reading traces captured on + // platforms other than what we're running on, we accept them + // all here. + if ( protocol != AF_INET + && protocol != AF_INET6 + && protocol != 24 + && protocol != 28 + && protocol != 30 ) + { + Weird("non_ip_packet_in_null_transport", ¤t_packet); + goto done; + } + + break; + } + + case DLT_EN10MB: + { + // Get protocol being carried from the ethernet frame. + protocol = (data[12] << 8) + data[13]; + + switch ( protocol ) + { + // MPLS carried over the ethernet frame. + case 0x8847: + // Remove the data link layer and denote a + // header size of zero before the IP header. + have_mpls = true; + data += GetLinkHeaderSize(props.link_type); + pkt_hdr_size = 0; + break; + + // VLAN carried over the ethernet frame. + case 0x8100: + data += GetLinkHeaderSize(props.link_type); + + // Check for MPLS in VLAN. + if ( ((data[2] << 8) + data[3]) == 0x8847 ) + have_mpls = true; + + data += 4; // Skip the vlan header + pkt_hdr_size = 0; + + // Check for 802.1ah (Q-in-Q) containing IP. + // Only do a second layer of vlan tag + // stripping because there is no + // specification that allows for deeper + // nesting. + if ( ((data[2] << 8) + data[3]) == 0x0800 ) + data += 4; + + break; + + // PPPoE carried over the ethernet frame. + case 0x8864: + data += GetLinkHeaderSize(props.link_type); + protocol = (data[6] << 8) + data[7]; + data += 8; // Skip the PPPoE session and PPP header + pkt_hdr_size = 0; + + if ( protocol != 0x0021 && protocol != 0x0057 ) + { + // Neither IPv4 nor IPv6. + Weird("non_ip_packet_in_pppoe_encapsulation", ¤t_packet); + goto done; + } + break; + } + + break; + } + + case DLT_PPP_SERIAL: + { + // Get PPP protocol. + protocol = (data[2] << 8) + data[3]; + + if ( protocol == 0x0281 ) + { + // MPLS Unicast. Remove the data link layer and + // denote a header size of zero before the IP header. + have_mpls = true; + data += GetLinkHeaderSize(props.link_type); + pkt_hdr_size = 0; + } + + else if ( protocol != 0x0021 && protocol != 0x0057 ) + { + // Neither IPv4 nor IPv6. + Weird("non_ip_packet_in_ppp_encapsulation", ¤t_packet); + goto done; + } + break; + } + } + + if ( have_mpls ) + { + // Skip the MPLS label stack. + bool end_of_stack = false; + + while ( ! end_of_stack ) + { + end_of_stack = *(data + 2) & 0x01; + data += 4; + } + } + + if ( pseudo_realtime ) + { + current_pseudo = CheckPseudoTime(); + net_packet_dispatch(current_pseudo, current_packet.hdr, data, pkt_hdr_size, this); + if ( ! first_wallclock ) + first_wallclock = current_time(true); + } + + else + net_packet_dispatch(current_packet.ts, current_packet.hdr, data, pkt_hdr_size, this); + +done: + have_packet = 0; + DoneWithPacket(); + } + +const char* PktSrc::Tag() + { + return "PktSrc"; + } + +bool PktSrc::ExtractNextPacketInternal() + { + if ( have_packet ) + return true; + + have_packet = false; + + // Don't return any packets if processing is suspended (except for the + // very first packet which we need to set up times). + if ( net_is_processing_suspended() && first_timestamp ) + { + SetIdle(true); + return 0; + } + + if ( pseudo_realtime ) + current_wallclock = current_time(true); + + if ( ExtractNextPacket(¤t_packet) ) + { + if ( ! first_timestamp ) + first_timestamp = current_packet.ts; + + SetIdle(false); + have_packet = true; + return 1; + } + + if ( pseudo_realtime && using_communication && ! IsOpen() ) + { + // Source has gone dry, we're done. + if ( remote_trace_sync_interval ) + remote_serializer->SendFinalSyncPoint(); + else + remote_serializer->Terminate(); + } + + SetIdle(true); + return 0; + } + +bool PktSrc::PrecompileBPFFilter(int index, const std::string& filter) + { + if ( index < 0 ) + return false; + + char errbuf[PCAP_ERRBUF_SIZE]; + + // Compile filter. + BPF_Program* code = new BPF_Program(); + + if ( ! code->Compile(SnapLen(), LinkType(), filter.c_str(), Netmask(), errbuf, sizeof(errbuf)) ) + { + string msg = fmt("cannot compile BPF filter \"%s\"", filter.c_str()); + + if ( *errbuf ) + msg += ": " + string(errbuf); + + Error(msg); + + delete code; + return 0; + } + + // Store it in hash. + HashKey* hash = new HashKey(HashKey(bro_int_t(index))); + BPF_Program* oldcode = filters.Lookup(hash); + if ( oldcode ) + delete oldcode; + + filters.Insert(hash, code); + delete hash; + + return 1; + } + +BPF_Program* PktSrc::GetBPFFilter(int index) + { + if ( index < 0 ) + return 0; + + HashKey* hash = new HashKey(HashKey(bro_int_t(index))); + BPF_Program* code = filters.Lookup(hash); + delete hash; + return code; + } + +bool PktSrc::ApplyBPFFilter(int index, const struct pcap_pkthdr *hdr, const u_char *pkt) + { + BPF_Program* code = GetBPFFilter(index); + + if ( ! code ) + { + Error(fmt("BPF filter %d not compiled", index)); + Close(); + } + + if ( code->MatchesAnything() ) + return true; + + return pcap_offline_filter(code->GetProgram(), hdr, pkt); + } + +bool PktSrc::GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt) + { + if ( ! have_packet ) + return false; + + *hdr = current_packet.hdr; + *pkt = current_packet.data; + return true; + } diff --git a/src/iosource/PktSrc.h b/src/iosource/PktSrc.h new file mode 100644 index 0000000000..9c05115257 --- /dev/null +++ b/src/iosource/PktSrc.h @@ -0,0 +1,417 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef IOSOURCE_PKTSRC_PKTSRC_H +#define IOSOURCE_PKTSRC_PKTSRC_H + +#include "IOSource.h" +#include "BPF_Program.h" +#include "Dict.h" + +declare(PDict,BPF_Program); + +namespace iosource { + +/** + * Base class for packet sources. + */ +class PktSrc : public IOSource { +public: + /** + * Struct for returning statistics on a packet source. + */ + struct Stats { + /** + * Packets received by source after filtering (w/o drops). + */ + unsigned int received; + + /** + * Packets dropped by source. + */ + unsigned int dropped; // pkts dropped + + /** + * Total number of packets on link before filtering. + * Optional, can be left unset if not available. + */ + unsigned int link; + + Stats() { received = dropped = link = 0; } + }; + + /** + * Constructor. + */ + PktSrc(); + + /** + * Destructor. + */ + virtual ~PktSrc(); + + /** + * Returns the path associated with the source. This is the interface + * name for live source, and a filename for offline sources. + */ + const std::string& Path() const; + + /** + * Returns true if this is a live source. + */ + bool IsLive() const; + + /** + * Returns the link type of the source. + */ + int LinkType() const; + + /** + * Returns the netmask associated with the source, or \c + * PCAP_NETMASK_UNKNOWN if unknown. + */ + uint32 Netmask() const; + + /** + * Returns true if the source has flagged an error. + */ + bool IsError() const; + + /** + * If the source encountered an error, returns a corresponding error + * message. Returns an empty string otherwise. + */ + const char* ErrorMsg() const; + + /** + * Returns the size of the link-layer header for this source. + */ + int HdrSize() const; + + /** + * Returns the snap length for this source. + */ + int SnapLen() const; + + /** + * In pseudo-realtime mode, returns the logical timestamp of the + * current packet. Undefined if not running pseudo-realtime mode. + */ + double CurrentPacketTimestamp(); + + /** + * In pseudo-realtime mode, returns the wall clock time associated + * with current packet. Undefined if not running pseudo-realtime + * mode. + */ + double CurrentPacketWallClock(); + + /** + * Signals packet source that processing is going to be continued + * after previous suspension. + */ + void ContinueAfterSuspend(); + + /** + * Precompiles a BPF filter and associates the given index with it. + * The compiled filter will be then available via \a GetBPFFilter(). + * + * This is primarily a helper for packet source implementation that + * want to apply BPF filtering to their packets. + * + * @param index The index to associate with the filter. + * + * @param BPF filter The filter string to precompile. + * + * @return True on success, false if a problem occurred. + */ + bool PrecompileBPFFilter(int index, const std::string& filter); + + /** + * Returns the precompiled BPF filter associated with a given index, + * if any, as compiled by \a PrecompileBPFFilter(). + * + * This is primarily a helper for packet source implementation that + * want to apply BPF filtering to their packets. + * + * @return The BPF filter associated, or null if none has been + * (successfully) compiled. + */ + BPF_Program* GetBPFFilter(int index); + + /** + * Applies a precompiled BPF filter to a packet. This will close the + * source with an error message if no filter with that index has been + * compiled. + * + * This is primarily a helper for packet source implementation that + * want to apply BPF filtering to their packets. + * + * @param index The index of the filter to apply. + * + * @param hdr The header of the packet to filter. + * + * @param pkt The content of the packet to filter. + * + * @return True if it maches. */ + bool ApplyBPFFilter(int index, const struct pcap_pkthdr *hdr, const u_char *pkt); + + /** + * Returns the packet currently being processed, if available. + * + * @param hdr A pointer to pass the header of the current packet back. + * + * @param pkt A pointer to pass the content of the current packet + * back. + * + * @return True if the current packet is available, or false if not. + */ + bool GetCurrentPacket(const pcap_pkthdr** hdr, const u_char** pkt); + + // PacketSource interace for derived classes to override. + + /** + * Precompiles a filter and associates a given index with it. The + * filter syntax is defined by the packet source's implenentation. + * + * Derived classes must implement this to implement their filtering. + * If they want to use BPF but don't support it natively, they can + * call the corresponding helper method provided by \a PktSrc. + * + * @param index The index to associate with the filter + * + * @param filter The filter string to precompile. + * + * @return True on success, false if a problem occurred or filtering + * is not supported. + */ + virtual bool PrecompileFilter(int index, const std::string& filter) = 0; + + /** + * Activates a precompiled filter with the given index. + * + * Derived classes must implement this to implement their filtering. + * If they want to use BPF but don't support it natively, they can + * call the corresponding helper method provided by \a PktSrc. + * + * @param index The index of the filter to activate. + * + * @return True on success, false if a problem occurred or the + * filtering is not supported. + */ + virtual bool SetFilter(int index) = 0; + + /** + * Returns current statistics about the source. + * + * Derived classes must implement this method. + * + * @param stats A statistics structure that the method fill out. + */ + virtual void Statistics(Stats* stats) = 0; + + /** + * Helper method to return the header size for a given link tyoe. + * + * @param link_type The link tyoe. + * + * @return The header size in bytes. + */ + static int GetLinkHeaderSize(int link_type); + +protected: + friend class Manager; + + // Methods to use by derived classes. + + /** + * Structure to pass back information about the packet source to the + * base class. Derived class pass an instance of this to \a Opened(). + */ + struct Properties { + /** + * The path associated with the source. This is the interface + * name for live source, and a filename for offline sources. + */ + std::string path; + + /** + * A file descriptor suitable to use with \a select() for + * determining if there's input available from this source. + */ + int selectable_fd; + + /** + * The link type for packets from this source. + */ + int link_type; + + /** + * The size of the link-layer header for packets from this + * source. \a GetLinkHeaderSize() may be used to derive this + * value. + */ + int hdr_size; + + /** + * The netmask associated with the source, or \c + * PCAP_NETMASK_UNKNOWN if unknown. + */ + uint32 netmask; + + /** + * True if the source is reading live inout, false for + * working offline. + */ + bool is_live; + + Properties() + { + netmask = PCAP_NETMASK_UNKNOWN; + } + }; + + /** + * Structure describing a packet. + */ + struct Packet { + /** + * Time associated with the packet. + */ + double ts; + + /** + * The pcap header associated with the packet. + */ + const struct ::pcap_pkthdr* hdr; + + /** + * The full content of the packet. + */ + const u_char* data; + }; + + /** + * Called from the implementations of \a Open() to signal that the + * source has been successully opened. + * + * @param props A properties instance describing the now open source. + */ + void Opened(const Properties& props); + + /** + * Called from the implementations of \a Close() to signal that the + * source has been closed. + */ + void Closed(); + + /** + * Can be called from derived classes to send an informational + * message to the user. + * + * @param msg The message to pass on. + */ + void Info(const std::string& msg); + + /** + * Can be called from derived classes to flag send an error. + * + * @param msg The message going with the error. + */ + void Error(const std::string& msg); + + /** + * Can be called from derived classes to flah a "weird" situation. + * + * @param msg The message to pass on. + * + * @param pkt The packet associated with the weird, or null if none. + */ + void Weird(const std::string& msg, const Packet* pkt); + + /** + * Can be called from derived classes to flag an internal error, + * which will abort execution. + * + * @param msg The message to pass on. + */ + void InternalError(const std::string& msg); + + // PktSrc interface for derived classes to implement. + + /** + * Called by the manager system to open the source. + * + * Derived classes must implement this method. If successful, the + * implementation must call \a Opened(); if not, it must call Error() + * with a corresponding message. + */ + virtual void Open() = 0; + + /** + * Called by the manager system to close the source. + * + * Derived classes must implement this method. If successful, the + * implementation must call \a Closed(); if not, it must call Error() + * with a corresponding message. + */ + virtual void Close() = 0; + + /** + * Provides the next packet from the source. + * + * @param pkt The packet structure to fill in with the packet's + * information. The callee keep ownership of the data but must + * guaranetee that it stays available at least until \a + * DoneWithPacket() is called. It is guaranteed that no two calls to + * this method will hapen with \a DoneWithPacket() in between. + * + * @return True if a packet is available and *pkt* filled in. False + * if not packet is available or an error occured (which must be + * flageed via Error()). + */ + virtual bool ExtractNextPacket(Packet* pkt) = 0; + + /** + * Signals that the data of previously extracted packet will no + * longer be needed. + */ + virtual void DoneWithPacket() = 0; + +private: + // Checks if the current packet has a pseudo-time <= current_time. If + // yes, returns pseudo-time, otherwise 0. + double CheckPseudoTime(); + + // Internal helper for ExtractNextPacket(). + bool ExtractNextPacketInternal(); + + // IOSource interface implementation. + virtual void Init(); + virtual void Done(); + virtual void GetFds(iosource::FD_Set* read, iosource::FD_Set* write, + iosource::FD_Set* except); + virtual double NextTimestamp(double* local_network_time); + virtual void Process(); + virtual const char* Tag(); + + Properties props; + + bool have_packet; + Packet current_packet; + + // For BPF filtering support. + PDict(BPF_Program) filters; + + // Only set in pseudo-realtime mode. + double first_timestamp; + double first_wallclock; + double current_wallclock; + double current_pseudo; + double next_sync_point; // For trace synchronziation in pseudo-realtime + + std::string errbuf; +}; + +} + +#endif diff --git a/src/iosource/pcap.bif b/src/iosource/pcap.bif new file mode 100644 index 0000000000..ee4e1e6c06 --- /dev/null +++ b/src/iosource/pcap.bif @@ -0,0 +1,104 @@ + +## Precompiles a PCAP filter and binds it to a given identifier. +## +## id: The PCAP identifier to reference the filter *s* later on. +## +## s: The PCAP filter. See ``man tcpdump`` for valid expressions. +## +## Returns: True if *s* is valid and precompiles successfully. +## +## .. bro:see:: install_pcap_filter +## install_src_addr_filter +## install_src_net_filter +## uninstall_src_addr_filter +## uninstall_src_net_filter +## install_dst_addr_filter +## install_dst_net_filter +## uninstall_dst_addr_filter +## uninstall_dst_net_filter +## pcap_error +function precompile_pcap_filter%(id: PcapFilterID, s: string%): bool + %{ + bool success = true; + + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); + + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) + { + iosource::PktSrc* ps = *i; + + if ( ! ps->PrecompileFilter(id->ForceAsInt(), + s->CheckString()) ) + success = false; + } + + return new Val(success, TYPE_BOOL); + %} + +## Installs a PCAP filter that has been precompiled with +## :bro:id:`precompile_pcap_filter`. +## +## id: The PCAP filter id of a precompiled filter. +## +## Returns: True if the filter associated with *id* has been installed +## successfully. +## +## .. bro:see:: precompile_pcap_filter +## install_src_addr_filter +## install_src_net_filter +## uninstall_src_addr_filter +## uninstall_src_net_filter +## install_dst_addr_filter +## install_dst_net_filter +## uninstall_dst_addr_filter +## uninstall_dst_net_filter +## pcap_error +function install_pcap_filter%(id: PcapFilterID%): bool + %{ + bool success = true; + + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); + + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) + { + iosource::PktSrc* ps = *i; + + if ( ! ps->SetFilter(id->ForceAsInt()) ) + success = false; + } + + return new Val(success, TYPE_BOOL); + %} + +## Returns a string representation of the last PCAP error. +## +## Returns: A descriptive error message of the PCAP function that failed. +## +## .. bro:see:: precompile_pcap_filter +## install_pcap_filter +## install_src_addr_filter +## install_src_net_filter +## uninstall_src_addr_filter +## uninstall_src_net_filter +## install_dst_addr_filter +## install_dst_net_filter +## uninstall_dst_addr_filter +## uninstall_dst_net_filter +function pcap_error%(%): string + %{ + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); + + for ( iosource::Manager::PktSrcList::const_iterator i = pkt_srcs.begin(); + i != pkt_srcs.end(); i++ ) + { + iosource::PktSrc* ps = *i; + + const char* err = ps->ErrorMsg(); + if ( *err ) + return new StringVal(err); + } + + return new StringVal("no error"); + %} diff --git a/src/iosource/pcap/CMakeLists.txt b/src/iosource/pcap/CMakeLists.txt new file mode 100644 index 0000000000..1c57bb6ac9 --- /dev/null +++ b/src/iosource/pcap/CMakeLists.txt @@ -0,0 +1,8 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro Pcap) +bro_plugin_cc(Source.cc Dumper.cc Plugin.cc) +bro_plugin_end() diff --git a/src/iosource/pcap/Dumper.cc b/src/iosource/pcap/Dumper.cc new file mode 100644 index 0000000000..5d0b5e599b --- /dev/null +++ b/src/iosource/pcap/Dumper.cc @@ -0,0 +1,112 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include +#include + +#include "Dumper.h" +#include "../PktSrc.h" +#include "../../Net.h" + +using namespace iosource::pcap; + +PcapDumper::PcapDumper(const std::string& path, bool arg_append) + { + append = arg_append; + props.path = path; + dumper = 0; + pd = 0; + } + +PcapDumper::~PcapDumper() + { + } + +void PcapDumper::Open() + { + int linktype = -1; + + pd = pcap_open_dead(DLT_EN10MB, snaplen); + if ( ! pd ) + { + Error("error for pcap_open_dead"); + return; + } + + if ( props.path.empty() ) + { + Error("no filename given"); + return; + } + + struct stat s; + int exists = 0; + + if ( append ) + { + // See if output file already exists (and is non-empty). + exists = stat(props.path.c_str(), &s); ; + + if ( exists < 0 && errno != ENOENT ) + { + Error(fmt("can't stat file %s: %s", props.path.c_str(), strerror(errno))); + return; + } + } + + if ( ! append || exists < 0 || s.st_size == 0 ) + { + // Open new file. + dumper = pcap_dump_open(pd, props.path.c_str()); + if ( ! dumper ) + { + Error(pcap_geterr(pd)); + return; + } + } + + else + { + // Old file and we need to append, which, unfortunately, + // is not supported by libpcap. So, we have to hack a + // little bit, knowing that pcap_dumpter_t is, in fact, + // a FILE ... :-( + dumper = (pcap_dumper_t*) fopen(props.path.c_str(), "a"); + if ( ! dumper ) + { + Error(fmt("can't open dump %s: %s", props.path.c_str(), strerror(errno))); + return; + } + } + + props.open_time = network_time; + props.hdr_size = PktSrc::GetLinkHeaderSize(pcap_datalink(pd)); + Opened(props); + } + +void PcapDumper::Close() + { + if ( ! dumper ) + return; + + pcap_dump_close(dumper); + pcap_close(pd); + dumper = 0; + pd = 0; + + Closed(); + } + +bool PcapDumper::Dump(const Packet* pkt) + { + if ( ! dumper ) + return false; + + pcap_dump((u_char*) dumper, pkt->hdr, pkt->data); + + return true; + } + +iosource::PktDumper* PcapDumper::Instantiate(const std::string& path, bool append) + { + return new PcapDumper(path, append); + } diff --git a/src/iosource/pcap/Dumper.h b/src/iosource/pcap/Dumper.h new file mode 100644 index 0000000000..7950912d56 --- /dev/null +++ b/src/iosource/pcap/Dumper.h @@ -0,0 +1,41 @@ +// See the file in the main distribution directory for copyright. + +#ifndef IOSOURCE_PKTSRC_PCAP_DUMPER_H +#define IOSOURCE_PKTSRC_PCAP_DUMPER_H + +extern "C" { +#include +} + +#include "../PktDumper.h" + +namespace iosource { +namespace pcap { + +class PcapDumper : public PktDumper { +public: + PcapDumper(const std::string& path, bool append); + virtual ~PcapDumper(); + + static PktDumper* Instantiate(const std::string& path, bool appen); + +protected: + // PktDumper interface. + virtual void Open(); + virtual void Close(); + virtual bool Dump(const Packet* pkt); + +private: + Properties props; + + bool append; + pcap_dumper_t* dumper; + pcap_t* pd; +}; + +} +} + +#endif + + diff --git a/src/iosource/pcap/Plugin.cc b/src/iosource/pcap/Plugin.cc new file mode 100644 index 0000000000..f0490e6e3d --- /dev/null +++ b/src/iosource/pcap/Plugin.cc @@ -0,0 +1,27 @@ +// See the file in the main distribution directory for copyright. + +#include "plugin/Plugin.h" + +#include "Source.h" +#include "Dumper.h" + +namespace plugin { +namespace Bro_Pcap { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::iosource::PktSrcComponent("PcapReader", "pcap", ::iosource::PktSrcComponent::BOTH, ::iosource::pcap::PcapSource::Instantiate)); + AddComponent(new ::iosource::PktDumperComponent("PcapWriter", "pcap", ::iosource::pcap::PcapDumper::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::Pcap"; + config.description = "Packet aquisition via libpcap"; + return config; + } +} plugin; + +} +} + diff --git a/src/iosource/pcap/Source.cc b/src/iosource/pcap/Source.cc new file mode 100644 index 0000000000..e96933aaa6 --- /dev/null +++ b/src/iosource/pcap/Source.cc @@ -0,0 +1,272 @@ +// See the file in the main distribution directory for copyright. + +#include + +#include "config.h" + +#include "Source.h" + +#ifdef HAVE_PCAP_INT_H +#include +#endif + +using namespace iosource::pcap; + +PcapSource::~PcapSource() + { + Close(); + } + +PcapSource::PcapSource(const std::string& path, bool is_live) + { + props.path = path; + props.is_live = is_live; + last_data = 0; + } + +void PcapSource::Open() + { + if ( props.is_live ) + OpenLive(); + else + OpenOffline(); + } + +void PcapSource::Close() + { + if ( ! pd ) + return; + + pcap_close(pd); + pd = 0; + last_data = 0; + + Closed(); + } + +void PcapSource::OpenLive() + { + char errbuf[PCAP_ERRBUF_SIZE]; + char tmp_errbuf[PCAP_ERRBUF_SIZE]; + + // Determine interface if not specified. + if ( props.path.empty() ) + props.path = pcap_lookupdev(tmp_errbuf); + + if ( props.path.empty() ) + { + safe_snprintf(errbuf, sizeof(errbuf), + "pcap_lookupdev: %s", tmp_errbuf); + Error(errbuf); + return; + } + + // Determine network and netmask. + uint32 net; + if ( pcap_lookupnet(props.path.c_str(), &net, &props.netmask, tmp_errbuf) < 0 ) + { + // ### The lookup can fail if no address is assigned to + // the interface; and libpcap doesn't have any useful notion + // of error codes, just error std::strings - how bogus - so we + // just kludge around the error :-(. + // sprintf(errbuf, "pcap_lookupnet %s", tmp_errbuf); + // return; + props.netmask = 0xffffff00; + } + + // We use the smallest time-out possible to return almost immediately if + // no packets are available. (We can't use set_nonblocking() as it's + // broken on FreeBSD: even when select() indicates that we can read + // something, we may get nothing if the store buffer hasn't filled up + // yet.) + pd = pcap_open_live(props.path.c_str(), SnapLen(), 1, 1, tmp_errbuf); + + if ( ! pd ) + { + Error(tmp_errbuf); + return; + } + + // ### This needs autoconf'ing. +#ifdef HAVE_PCAP_INT_H + Info(fmt("pcap bufsize = %d\n", ((struct pcap *) pd)->bufsize)); +#endif + +#ifdef HAVE_LINUX + if ( pcap_setnonblock(pd, 1, tmp_errbuf) < 0 ) + { + PcapError(); + return; + } +#endif + + props.selectable_fd = pcap_fileno(pd); + + SetHdrSize(); + + if ( ! pd ) + // Was closed, couldn't get header size. + return; + + props.is_live = true; + + Opened(props); + } + +void PcapSource::OpenOffline() + { + char errbuf[PCAP_ERRBUF_SIZE]; + + pd = pcap_open_offline(props.path.c_str(), errbuf); + + if ( ! pd ) + { + Error(errbuf); + return; + } + + SetHdrSize(); + + if ( ! pd ) + // Was closed, unknown link layer type. + return; + + props.selectable_fd = fileno(pcap_file(pd)); + + if ( props.selectable_fd < 0 ) + InternalError("OS does not support selectable pcap fd"); + + props.is_live = false; + Opened(props); + } + +bool PcapSource::ExtractNextPacket(Packet* pkt) + { + if ( ! pd ) + return false; + + const u_char* data = pcap_next(pd, ¤t_hdr); + + if ( ! data ) + { + // Source has gone dry. If it's a network interface, this just means + // it's timed out. If it's a file, though, then the file has been + // exhausted. + if ( ! props.is_live ) + Close(); + + return false; + } + + pkt->ts = current_hdr.ts.tv_sec + double(current_hdr.ts.tv_usec) / 1e6; + pkt->hdr = ¤t_hdr; + pkt->data = last_data = data; + + if ( current_hdr.len == 0 || current_hdr.caplen == 0 ) + { + Weird("empty_pcap_header", pkt); + return false; + } + + last_hdr = current_hdr; + last_data = data; + ++stats.received; + return true; + } + +void PcapSource::DoneWithPacket() + { + // Nothing to do. + } + +bool PcapSource::PrecompileFilter(int index, const std::string& filter) + { + return PktSrc::PrecompileBPFFilter(index, filter); + } + +bool PcapSource::SetFilter(int index) + { + if ( ! pd ) + return true; // Prevent error message + + char errbuf[PCAP_ERRBUF_SIZE]; + + BPF_Program* code = GetBPFFilter(index); + + if ( ! code ) + { + safe_snprintf(errbuf, sizeof(errbuf), + "No precompiled pcap filter for index %d", + index); + Error(errbuf); + return false; + } + + if ( pcap_setfilter(pd, code->GetProgram()) < 0 ) + { + PcapError(); + return false; + } + +#ifndef HAVE_LINUX + // Linux doesn't clear counters when resetting filter. + stats.received = stats.dropped = stats.link = 0; +#endif + + return true; + } + +void PcapSource::Statistics(Stats* s) + { + char errbuf[PCAP_ERRBUF_SIZE]; + + if ( ! (props.is_live && pd) ) + s->received = s->dropped = s->link = 0; + + else + { + struct pcap_stat pstat; + if ( pcap_stats(pd, &pstat) < 0 ) + { + PcapError(); + s->received = s->dropped = s->link = 0; + } + + else + { + s->dropped = pstat.ps_drop; + s->link = pstat.ps_recv; + } + } + + s->received = stats.received; + + if ( ! props.is_live ) + s->dropped = 0; + } + +void PcapSource::PcapError() + { + if ( pd ) + Error(fmt("pcap_error: %s", pcap_geterr(pd))); + else + Error("pcap_error: not open"); + + Close(); + } + +void PcapSource::SetHdrSize() + { + if ( ! pd ) + return; + + char errbuf[PCAP_ERRBUF_SIZE]; + + props.link_type = pcap_datalink(pd); + props.hdr_size = GetLinkHeaderSize(props.link_type); + } + +iosource::PktSrc* PcapSource::Instantiate(const std::string& path, bool is_live) + { + return new PcapSource(path, is_live); + } diff --git a/src/iosource/pcap/Source.h b/src/iosource/pcap/Source.h new file mode 100644 index 0000000000..f627e30afa --- /dev/null +++ b/src/iosource/pcap/Source.h @@ -0,0 +1,47 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef IOSOURCE_PKTSRC_PCAP_SOURCE_H +#define IOSOURCE_PKTSRC_PCAP_SOURCE_H + +#include "../PktSrc.h" + +namespace iosource { +namespace pcap { + +class PcapSource : public iosource::PktSrc { +public: + PcapSource(const std::string& path, bool is_live); + virtual ~PcapSource(); + + static PktSrc* Instantiate(const std::string& path, bool is_live); + +protected: + // PktSrc interface. + virtual void Open(); + virtual void Close(); + virtual bool ExtractNextPacket(Packet* pkt); + virtual void DoneWithPacket(); + virtual bool PrecompileFilter(int index, const std::string& filter); + virtual bool SetFilter(int index); + virtual void Statistics(Stats* stats); + +private: + void OpenLive(); + void OpenOffline(); + void PcapError(); + void SetHdrSize(); + + Properties props; + Stats stats; + + pcap_t *pd; + + struct pcap_pkthdr current_hdr; + struct pcap_pkthdr last_hdr; + const u_char* last_data; +}; + +} +} + +#endif diff --git a/src/logging/CMakeLists.txt b/src/logging/CMakeLists.txt new file mode 100644 index 0000000000..f7ed586014 --- /dev/null +++ b/src/logging/CMakeLists.txt @@ -0,0 +1,23 @@ + +include(BroSubdir) + +include_directories(BEFORE + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} +) + +add_subdirectory(writers) + +set(logging_SRCS + Component.cc + Manager.cc + WriterBackend.cc + WriterFrontend.cc + Tag.cc +) + +bif_target(logging.bif) + +bro_add_subdir_library(logging ${logging_SRCS} ${BIF_OUTPUT_CC}) +add_dependencies(bro_logging generate_outputs) + diff --git a/src/logging/Component.cc b/src/logging/Component.cc new file mode 100644 index 0000000000..3af29fd96f --- /dev/null +++ b/src/logging/Component.cc @@ -0,0 +1,26 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Component.h" +#include "Manager.h" +#include "../Desc.h" +#include "../util.h" + +using namespace logging; + +Component::Component(const std::string& name, factory_callback arg_factory) + : plugin::Component(plugin::component::WRITER, name) + { + factory = arg_factory; + + log_mgr->RegisterComponent(this, "WRITER_"); + } + +Component::~Component() + { + } + +void Component::DoDescribe(ODesc* d) const + { + d->Add("Log::WRITER_"); + d->Add(CanonicalName()); + } diff --git a/src/logging/Component.h b/src/logging/Component.h new file mode 100644 index 0000000000..21e114b36c --- /dev/null +++ b/src/logging/Component.h @@ -0,0 +1,59 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef LOGGING_COMPONENT_H +#define LOGGING_COMPONENT_H + +#include "Tag.h" +#include "plugin/Component.h" +#include "plugin/TaggedComponent.h" + +namespace logging { + +class WriterFrontend; +class WriterBackend; + +/** + * Component description for plugins providing log writers. + */ +class Component : public plugin::Component, + public plugin::TaggedComponent { +public: + typedef WriterBackend* (*factory_callback)(WriterFrontend* frontend); + + /** + * Constructor. + * + * @param name The name of the provided writer. This name is used + * across the system to identify the writer. + * + * @param factory A factory function to instantiate instances of the + * writers's class, which must be derived directly or indirectly from + * logging::WriterBackend. This is typically a static \c Instatiate() + * method inside the class that just allocates and returns a new + * instance. + */ + Component(const std::string& name, factory_callback factory); + + /** + * Destructor. + */ + ~Component(); + + /** + * Returns the writer's factory function. + */ + factory_callback Factory() const { return factory; } + +protected: + /** + * Overriden from plugin::Component. + */ + virtual void DoDescribe(ODesc* d) const; + +private: + factory_callback factory; +}; + +} + +#endif diff --git a/src/logging/Manager.cc b/src/logging/Manager.cc index 55e0fddb5a..1fe5db3b26 100644 --- a/src/logging/Manager.cc +++ b/src/logging/Manager.cc @@ -14,48 +14,10 @@ #include "Manager.h" #include "WriterFrontend.h" #include "WriterBackend.h" - -#include "writers/Ascii.h" -#include "writers/None.h" - -#ifdef USE_ELASTICSEARCH -#include "writers/ElasticSearch.h" -#endif - -#ifdef USE_DATASERIES -#include "writers/DataSeries.h" -#endif - -#include "writers/SQLite.h" +#include "logging.bif.h" using namespace logging; -// Structure describing a log writer type. -struct WriterDefinition { - bro_int_t type; // The type. - const char *name; // Descriptive name for error messages. - bool (*init)(); // An optional one-time initialization function. - WriterBackend* (*factory)(WriterFrontend* frontend); // A factory function creating instances. -}; - -// Static table defining all availabel log writers. -WriterDefinition log_writers[] = { - { BifEnum::Log::WRITER_NONE, "None", 0, writer::None::Instantiate }, - { BifEnum::Log::WRITER_ASCII, "Ascii", 0, writer::Ascii::Instantiate }, - { BifEnum::Log::WRITER_SQLITE, "SQLite", 0, writer::SQLite::Instantiate }, - -#ifdef USE_ELASTICSEARCH - { BifEnum::Log::WRITER_ELASTICSEARCH, "ElasticSearch", 0, writer::ElasticSearch::Instantiate }, -#endif - -#ifdef USE_DATASERIES - { BifEnum::Log::WRITER_DATASERIES, "DataSeries", 0, writer::DataSeries::Instantiate }, -#endif - - // End marker, don't touch. - { BifEnum::Log::WRITER_DEFAULT, "None", 0, (WriterBackend* (*)(WriterFrontend* frontend))0 } -}; - struct Manager::Filter { string name; EnumVal* id; @@ -142,6 +104,7 @@ Manager::Stream::~Stream() } Manager::Manager() + : plugin::ComponentManager("Log", "Writer") { rotations_pending = 0; } @@ -152,64 +115,17 @@ Manager::~Manager() delete *s; } -list Manager::SupportedFormats() +WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, EnumVal* tag) { - list formats; + Component* c = Lookup(tag); - for ( WriterDefinition* ld = log_writers; ld->type != BifEnum::Log::WRITER_DEFAULT; ++ld ) - formats.push_back(ld->name); - - return formats; - } - -WriterBackend* Manager::CreateBackend(WriterFrontend* frontend, bro_int_t type) - { - WriterDefinition* ld = log_writers; - - while ( true ) + if ( ! c ) { - if ( ld->type == BifEnum::Log::WRITER_DEFAULT ) - { - reporter->Error("unknown writer type requested"); - return 0; - } - - if ( ld->type != type ) - { - // Not the right one. - ++ld; - continue; - } - - // If the writer has an init function, call it. - if ( ld->init ) - { - if ( (*ld->init)() ) - // Clear the init function so that we won't - // call it again later. - ld->init = 0; - else - { - // Init failed, disable by deleting factory - // function. - ld->factory = 0; - - reporter->Error("initialization of writer %s failed", ld->name); - return 0; - } - } - - if ( ! ld->factory ) - // Oops, we can't instantiate this guy. - return 0; - - // All done. - break; + reporter->Error("unknown writer type requested"); + return 0; } - assert(ld->factory); - - WriterBackend* backend = (*ld->factory)(frontend); + WriterBackend* backend = (*c->Factory())(frontend); assert(backend); return backend; @@ -1234,7 +1150,7 @@ void Manager::SendAllWritersTo(RemoteSerializer::PeerID peer) { WriterFrontend* writer = i->second->writer; - EnumVal writer_val(i->first.first, BifType::Enum::Log::Writer); + EnumVal writer_val(i->first.first, internal_type("Log::Writer")->AsEnumType()); remote_serializer->SendLogCreateWriter(peer, (*s)->id, &writer_val, *i->second->info, diff --git a/src/logging/Manager.h b/src/logging/Manager.h index 61f6dcd8a7..b8264927a3 100644 --- a/src/logging/Manager.h +++ b/src/logging/Manager.h @@ -6,9 +6,12 @@ #define LOGGING_MANAGER_H #include "../Val.h" +#include "../Tag.h" #include "../EventHandler.h" #include "../RemoteSerializer.h" +#include "../plugin/ComponentManager.h" +#include "Component.h" #include "WriterBackend.h" class SerializationFormat; @@ -23,7 +26,7 @@ class RotationFinishedMessage; /** * Singleton class for managing log streams. */ -class Manager { +class Manager : public plugin::ComponentManager { public: /** * Constructor. @@ -154,11 +157,6 @@ public: */ void Terminate(); - /** - * Returns a list of supported output formats. - */ - static list SupportedFormats(); - protected: friend class WriterFrontend; friend class RotationFinishedMessage; @@ -168,7 +166,7 @@ protected: // Instantiates a new WriterBackend of the given type (note that // doing so creates a new thread!). - WriterBackend* CreateBackend(WriterFrontend* frontend, bro_int_t type); + WriterBackend* CreateBackend(WriterFrontend* frontend, EnumVal* tag); //// Function also used by the RemoteSerializer. diff --git a/src/logging/Tag.cc b/src/logging/Tag.cc new file mode 100644 index 0000000000..dea3b41819 --- /dev/null +++ b/src/logging/Tag.cc @@ -0,0 +1,22 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#include "Tag.h" +#include "Manager.h" + +logging::Tag logging::Tag::Error; + +logging::Tag::Tag(type_t type, subtype_t subtype) + : ::Tag(log_mgr->GetTagEnumType(), type, subtype) + { + } + +logging::Tag& logging::Tag::operator=(const logging::Tag& other) + { + ::Tag::operator=(other); + return *this; + } + +EnumVal* logging::Tag::AsEnumVal() const + { + return ::Tag::AsEnumVal(log_mgr->GetTagEnumType()); + } diff --git a/src/logging/Tag.h b/src/logging/Tag.h new file mode 100644 index 0000000000..b5b235154a --- /dev/null +++ b/src/logging/Tag.h @@ -0,0 +1,116 @@ +// See the file "COPYING" in the main distribution directory for copyright. + +#ifndef LOGGING_TAG_H +#define LOGGING_TAG_H + +#include "config.h" +#include "util.h" +#include "../Tag.h" +#include "plugin/TaggedComponent.h" +#include "plugin/ComponentManager.h" + +class EnumVal; + +namespace logging { + +class Manager; +class Component; + +/** + * Class to identify a writer type. + * + * The script-layer analogue is Log::Writer. + */ +class Tag : public ::Tag { +public: + /* + * Copy constructor. + */ + Tag(const Tag& other) : ::Tag(other) {} + + /** + * Default constructor. This initializes the tag with an error value + * that will make \c operator \c bool return false. + */ + Tag() : ::Tag() {} + + /** + * Destructor. + */ + ~Tag() {} + + /** + * Returns false if the tag represents an error value rather than a + * legal writer type. + * TODO: make this conversion operator "explicit" (C++11) or use a + * "safe bool" idiom (not necessary if "explicit" is available), + * otherwise this may allow nonsense/undesired comparison operations. + */ + operator bool() const { return *this != Tag(); } + + /** + * Assignment operator. + */ + Tag& operator=(const Tag& other); + + /** + * Compares two tags for equality. + */ + bool operator==(const Tag& other) const + { + return ::Tag::operator==(other); + } + + /** + * Compares two tags for inequality. + */ + bool operator!=(const Tag& other) const + { + return ::Tag::operator!=(other); + } + + /** + * Compares two tags for less-than relationship. + */ + bool operator<(const Tag& other) const + { + return ::Tag::operator<(other); + } + + /** + * Returns the \c Log::Writer enum that corresponds to this tag. + * The returned value does not have its ref-count increased. + * + * @param etype the script-layer enum type associated with the tag. + */ + EnumVal* AsEnumVal() const; + + static Tag Error; + +protected: + friend class plugin::ComponentManager; + friend class plugin::TaggedComponent; + + /** + * Constructor. + * + * @param type The main type. Note that the \a logging::Manager + * manages the value space internally, so noone else should assign + * any main types. + * + * @param subtype The sub type, which is left to an writer for + * interpretation. By default it's set to zero. + */ + Tag(type_t type, subtype_t subtype = 0); + + /** + * Constructor. + * + * @param val An enum value of script type \c Log::Writer. + */ + Tag(EnumVal* val) : ::Tag(val) {} +}; + +} + +#endif diff --git a/src/logging/WriterBackend.h b/src/logging/WriterBackend.h index f5c74e582c..783a497823 100644 --- a/src/logging/WriterBackend.h +++ b/src/logging/WriterBackend.h @@ -7,6 +7,8 @@ #include "threading/MsgThread.h" +#include "Component.h" + class RemoteSerializer; namespace logging { diff --git a/src/logging/WriterFrontend.cc b/src/logging/WriterFrontend.cc index 09490ce3d1..a075701151 100644 --- a/src/logging/WriterFrontend.cc +++ b/src/logging/WriterFrontend.cc @@ -120,7 +120,7 @@ WriterFrontend::WriterFrontend(const WriterBackend::WriterInfo& arg_info, EnumVa if ( local ) { - backend = log_mgr->CreateBackend(this, writer->AsEnum()); + backend = log_mgr->CreateBackend(this, writer); if ( backend ) backend->Start(); diff --git a/src/logging.bif b/src/logging/logging.bif similarity index 61% rename from src/logging.bif rename to src/logging/logging.bif index 062e4dbe31..87323ef789 100644 --- a/src/logging.bif +++ b/src/logging/logging.bif @@ -3,8 +3,6 @@ module Log; %%{ -#include "NetVar.h" - #include "logging/Manager.h" %%} @@ -65,55 +63,3 @@ function Log::__flush%(id: Log::ID%): bool bool result = log_mgr->Flush(id->AsEnumVal()); return new Val(result, TYPE_BOOL); %} - -# Options for the ASCII writer. - -module LogAscii; - -const output_to_stdout: bool; -const include_meta: bool; -const meta_prefix: string; -const separator: string; -const set_separator: string; -const empty_field: string; -const unset_field: string; -const use_json: bool; -const json_timestamps: JSON::TimestampFormat; - -# Options for the DataSeries writer. - -module LogDataSeries; - -const compression: string; -const extent_size: count; -const dump_schema: bool; -const use_integer_for_time: bool; -const num_threads: count; - -# Options for the SQLite writer - -module LogSQLite; - -const set_separator: string; -const empty_field: string; -const unset_field: string; - -# Options for the ElasticSearch writer. - -module LogElasticSearch; - -const cluster_name: string; -const server_host: string; -const server_port: count; -const index_prefix: string; -const type_prefix: string; -const transfer_timeout: interval; -const max_batch_size: count; -const max_batch_interval: interval; -const max_byte_size: count; - -# Options for the None writer. - -module LogNone; - -const debug: bool; diff --git a/src/logging/writers/CMakeLists.txt b/src/logging/writers/CMakeLists.txt new file mode 100644 index 0000000000..867ad58c47 --- /dev/null +++ b/src/logging/writers/CMakeLists.txt @@ -0,0 +1,4 @@ + +add_subdirectory(ascii) +add_subdirectory(none) +add_subdirectory(sqlite) diff --git a/src/logging/writers/DataSeries.cc b/src/logging/writers/DataSeries.cc deleted file mode 100644 index 2c14a51e25..0000000000 --- a/src/logging/writers/DataSeries.cc +++ /dev/null @@ -1,462 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. - -#include "config.h" - -#ifdef USE_DATASERIES - -#include -#include -#include - -#include - -#include "NetVar.h" -#include "threading/SerialTypes.h" - -#include "DataSeries.h" - -using namespace logging; -using namespace writer; - -std::string DataSeries::LogValueToString(threading::Value *val) - { - // In some cases, no value is attached. If this is the case, return - // an empty string. - if( ! val->present ) - return ""; - - switch(val->type) { - case TYPE_BOOL: - return (val->val.int_val ? "true" : "false"); - - case TYPE_INT: - { - std::ostringstream ostr; - ostr << val->val.int_val; - return ostr.str(); - } - - case TYPE_COUNT: - case TYPE_COUNTER: - case TYPE_PORT: - { - std::ostringstream ostr; - ostr << val->val.uint_val; - return ostr.str(); - } - - case TYPE_SUBNET: - return ascii->Render(val->val.subnet_val); - - case TYPE_ADDR: - return ascii->Render(val->val.addr_val); - - // Note: These two cases are relatively special. We need to convert - // these values into their integer equivalents to maximize precision. - // At the moment, there won't be a noticeable effect (Bro uses the - // double format everywhere internally, so we've already lost the - // precision we'd gain here), but timestamps may eventually switch to - // this representation within Bro. - // - // In the near-term, this *should* lead to better pack_relative (and - // thus smaller output files). - case TYPE_TIME: - case TYPE_INTERVAL: - if ( ds_use_integer_for_time ) - { - std::ostringstream ostr; - ostr << (uint64_t)(DataSeries::TIME_SCALE * val->val.double_val); - return ostr.str(); - } - else - return ascii->Render(val->val.double_val); - - case TYPE_DOUBLE: - return ascii->Render(val->val.double_val); - - case TYPE_ENUM: - case TYPE_STRING: - case TYPE_FILE: - case TYPE_FUNC: - if ( ! val->val.string_val.length ) - return ""; - - return string(val->val.string_val.data, val->val.string_val.length); - - case TYPE_TABLE: - { - if ( ! val->val.set_val.size ) - return ""; - - string tmpString = ""; - - for ( int j = 0; j < val->val.set_val.size; j++ ) - { - if ( j > 0 ) - tmpString += ds_set_separator; - - tmpString += LogValueToString(val->val.set_val.vals[j]); - } - - return tmpString; - } - - case TYPE_VECTOR: - { - if ( ! val->val.vector_val.size ) - return ""; - - string tmpString = ""; - - for ( int j = 0; j < val->val.vector_val.size; j++ ) - { - if ( j > 0 ) - tmpString += ds_set_separator; - - tmpString += LogValueToString(val->val.vector_val.vals[j]); - } - - return tmpString; - } - - default: - InternalError(Fmt("unknown type %s in DataSeries::LogValueToString", type_name(val->type))); - return "cannot be reached"; - } -} - -string DataSeries::GetDSFieldType(const threading::Field *field) -{ - switch(field->type) { - case TYPE_BOOL: - return "bool"; - - case TYPE_COUNT: - case TYPE_COUNTER: - case TYPE_PORT: - case TYPE_INT: - return "int64"; - - case TYPE_DOUBLE: - return "double"; - - case TYPE_TIME: - case TYPE_INTERVAL: - return ds_use_integer_for_time ? "int64" : "double"; - - case TYPE_SUBNET: - case TYPE_ADDR: - case TYPE_ENUM: - case TYPE_STRING: - case TYPE_FILE: - case TYPE_TABLE: - case TYPE_VECTOR: - case TYPE_FUNC: - return "variable32"; - - default: - InternalError(Fmt("unknown type %s in DataSeries::GetDSFieldType", type_name(field->type))); - return "cannot be reached"; - } -} - -string DataSeries::BuildDSSchemaFromFieldTypes(const vector& vals, string sTitle) - { - if( ! sTitle.size() ) - sTitle = "GenericBroStream"; - - string xmlschema = "\n"; - - for( size_t i = 0; i < vals.size(); ++i ) - { - xmlschema += "\t\n"; - } - - xmlschema += "\n"; - - for( size_t i = 0; i < vals.size(); ++i ) - { - xmlschema += "\n"; - } - - return xmlschema; -} - -std::string DataSeries::GetDSOptionsForType(const threading::Field *field) -{ - switch( field->type ) { - case TYPE_TIME: - case TYPE_INTERVAL: - { - std::string s; - s += "pack_relative=\"" + std::string(field->name) + "\""; - - if ( ! ds_use_integer_for_time ) - s += " pack_scale=\"1e-6\" print_format=\"%.6f\" pack_scale_warn=\"no\""; - else - s += string(" units=\"") + TIME_UNIT() + "\" epoch=\"unix\""; - - return s; - } - - case TYPE_SUBNET: - case TYPE_ADDR: - case TYPE_ENUM: - case TYPE_STRING: - case TYPE_FILE: - case TYPE_TABLE: - case TYPE_VECTOR: - return "pack_unique=\"yes\""; - - default: - return ""; - } -} - -DataSeries::DataSeries(WriterFrontend* frontend) : WriterBackend(frontend) -{ - ds_compression = string((const char *)BifConst::LogDataSeries::compression->Bytes(), - BifConst::LogDataSeries::compression->Len()); - ds_dump_schema = BifConst::LogDataSeries::dump_schema; - ds_extent_size = BifConst::LogDataSeries::extent_size; - ds_num_threads = BifConst::LogDataSeries::num_threads; - ds_use_integer_for_time = BifConst::LogDataSeries::use_integer_for_time; - ds_set_separator = ","; - - threading::formatter::Ascii::SeparatorInfo sep_info; - ascii = new threading::formatter::Ascii(this, sep_info); - - compress_type = Extent::compress_mode_none; - log_file = 0; - log_output = 0; -} - -DataSeries::~DataSeries() - { - delete ascii; - } - -bool DataSeries::OpenLog(string path) - { - log_file = new DataSeriesSink(path + ".ds", compress_type); - log_file->writeExtentLibrary(log_types); - - for( size_t i = 0; i < schema_list.size(); ++i ) - { - string fn = schema_list[i].field_name; - GeneralField* gf = 0; -#ifdef USE_PERFTOOLS_DEBUG - { - // GeneralField isn't cleaning up some results of xml parsing, reported - // here: https://github.com/dataseries/DataSeries/issues/1 - // Ignore for now to make leak tests pass. There's confidence that - // we do clean up the GeneralField* since the ExtentSeries dtor for - // member log_series would trigger an assert if dynamically allocated - // fields aren't deleted beforehand. - HeapLeakChecker::Disabler disabler; -#endif - gf = GeneralField::create(log_series, fn); -#ifdef USE_PERFTOOLS_DEBUG - } -#endif - extents.insert(std::make_pair(fn, gf)); - } - - if ( ds_extent_size < ROW_MIN ) - { - Warning(Fmt("%d is not a valid value for 'rows'. Using min of %d instead", (int)ds_extent_size, (int)ROW_MIN)); - ds_extent_size = ROW_MIN; - } - - else if( ds_extent_size > ROW_MAX ) - { - Warning(Fmt("%d is not a valid value for 'rows'. Using max of %d instead", (int)ds_extent_size, (int)ROW_MAX)); - ds_extent_size = ROW_MAX; - } - - log_output = new OutputModule(*log_file, log_series, log_type, ds_extent_size); - - return true; - } - -bool DataSeries::DoInit(const WriterInfo& info, int num_fields, const threading::Field* const * fields) - { - // We first construct an XML schema thing (and, if ds_dump_schema is - // set, dump it to path + ".ds.xml"). Assuming that goes well, we - // use that schema to build our output logfile and prepare it to be - // written to. - - // Note: compressor count must be set *BEFORE* DataSeriesSink is - // instantiated. - if( ds_num_threads < THREAD_MIN && ds_num_threads != 0 ) - { - Warning(Fmt("%d is too few threads! Using %d instead", (int)ds_num_threads, (int)THREAD_MIN)); - ds_num_threads = THREAD_MIN; - } - - if( ds_num_threads > THREAD_MAX ) - { - Warning(Fmt("%d is too many threads! Dropping back to %d", (int)ds_num_threads, (int)THREAD_MAX)); - ds_num_threads = THREAD_MAX; - } - - if( ds_num_threads > 0 ) - DataSeriesSink::setCompressorCount(ds_num_threads); - - for ( int i = 0; i < num_fields; i++ ) - { - const threading::Field* field = fields[i]; - SchemaValue val; - val.ds_type = GetDSFieldType(field); - val.field_name = string(field->name); - val.field_options = GetDSOptionsForType(field); - val.bro_type = field->TypeName(); - schema_list.push_back(val); - } - - string schema = BuildDSSchemaFromFieldTypes(schema_list, info.path); - - if( ds_dump_schema ) - { - string name = string(info.path) + ".ds.xml"; - FILE* pFile = fopen(name.c_str(), "wb" ); - - if( pFile ) - { - fwrite(schema.c_str(), 1, schema.length(), pFile); - fclose(pFile); - } - - else - Error(Fmt("cannot dump schema: %s", Strerror(errno))); - } - - compress_type = Extent::compress_all; - - if( ds_compression == "lzf" ) - compress_type = Extent::compress_mode_lzf; - - else if( ds_compression == "lzo" ) - compress_type = Extent::compress_mode_lzo; - - else if( ds_compression == "zlib" ) - compress_type = Extent::compress_mode_zlib; - - else if( ds_compression == "bz2" ) - compress_type = Extent::compress_mode_bz2; - - else if( ds_compression == "none" ) - compress_type = Extent::compress_mode_none; - - else if( ds_compression == "any" ) - compress_type = Extent::compress_all; - - else - Warning(Fmt("%s is not a valid compression type. Valid types are: 'lzf', 'lzo', 'zlib', 'bz2', 'none', 'any'. Defaulting to 'any'", ds_compression.c_str())); - - log_type = log_types.registerTypePtr(schema); - log_series.setType(log_type); - - return OpenLog(info.path); - } - -bool DataSeries::DoFlush(double network_time) -{ - // Flushing is handled by DataSeries automatically, so this function - // doesn't do anything. - return true; -} - -void DataSeries::CloseLog() - { - for( ExtentIterator iter = extents.begin(); iter != extents.end(); ++iter ) - delete iter->second; - - extents.clear(); - - // Don't delete the file before you delete the output, or bad things - // will happen. - delete log_output; - delete log_file; - - log_output = 0; - log_file = 0; - } - -bool DataSeries::DoFinish(double network_time) -{ - CloseLog(); - return true; -} - -bool DataSeries::DoWrite(int num_fields, const threading::Field* const * fields, - threading::Value** vals) -{ - log_output->newRecord(); - - for( size_t i = 0; i < (size_t)num_fields; ++i ) - { - ExtentIterator iter = extents.find(fields[i]->name); - assert(iter != extents.end()); - - if( iter != extents.end() ) - { - GeneralField *cField = iter->second; - - if( vals[i]->present ) - cField->set(LogValueToString(vals[i])); - } - } - - return true; -} - -bool DataSeries::DoRotate(const char* rotated_path, double open, double close, bool terminating) -{ - // Note that if DS files are rotated too often, the aggregate log - // size will be (much) larger. - CloseLog(); - - string dsname = string(Info().path) + ".ds"; - string nname = string(rotated_path) + ".ds"; - - if ( rename(dsname.c_str(), nname.c_str()) != 0 ) - { - char buf[256]; - strerror_r(errno, buf, sizeof(buf)); - Error(Fmt("failed to rename %s to %s: %s", dsname.c_str(), - nname.c_str(), buf)); - FinishedRotation(); - return false; - } - - if ( ! FinishedRotation(nname.c_str(), dsname.c_str(), open, close, terminating) ) - { - Error(Fmt("error rotating %s to %s", dsname.c_str(), nname.c_str())); - return false; - } - - return OpenLog(Info().path); -} - -bool DataSeries::DoSetBuf(bool enabled) -{ - // DataSeries is *always* buffered to some degree. This option is ignored. - return true; -} - -bool DataSeries::DoHeartbeat(double network_time, double current_time) -{ - return true; -} - -#endif /* USE_DATASERIES */ diff --git a/src/logging/writers/DataSeries.h b/src/logging/writers/DataSeries.h deleted file mode 100644 index fe095bcb37..0000000000 --- a/src/logging/writers/DataSeries.h +++ /dev/null @@ -1,128 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. -// -// A binary log writer producing DataSeries output. See doc/data-series.rst -// for more information. - -#ifndef LOGGING_WRITER_DATA_SERIES_H -#define LOGGING_WRITER_DATA_SERIES_H - -#include -#include -#include -#include - -#include "../WriterBackend.h" -#include "threading/formatters/Ascii.h" - -namespace logging { namespace writer { - -class DataSeries : public WriterBackend { -public: - DataSeries(WriterFrontend* frontend); - ~DataSeries(); - - static WriterBackend* Instantiate(WriterFrontend* frontend) - { return new DataSeries(frontend); } - -protected: - // Overidden from WriterBackend. - - virtual bool DoInit(const WriterInfo& info, int num_fields, - const threading::Field* const * fields); - - virtual bool DoWrite(int num_fields, const threading::Field* const* fields, - threading::Value** vals); - virtual bool DoSetBuf(bool enabled); - virtual bool DoRotate(const char* rotated_path, double open, - double close, bool terminating); - virtual bool DoFlush(double network_time); - virtual bool DoFinish(double network_time); - virtual bool DoHeartbeat(double network_time, double current_time); - -private: - static const size_t ROW_MIN = 2048; // Minimum extent size. - static const size_t ROW_MAX = (1024 * 1024 * 100); // Maximum extent size. - static const size_t THREAD_MIN = 1; // Minimum number of compression threads that DataSeries may spawn. - static const size_t THREAD_MAX = 128; // Maximum number of compression threads that DataSeries may spawn. - static const size_t TIME_SCALE = 1000000; // Fixed-point multiplier for time values when converted to integers. - const char* TIME_UNIT() { return "microseconds"; } // DS name for time resolution when converted to integers. Must match TIME_SCALE. - - struct SchemaValue - { - string ds_type; - string bro_type; - string field_name; - string field_options; - }; - - /** - * Turns a log value into a std::string. Uses an ostringstream to do the - * heavy lifting, but still need to switch on the type to know which value - * in the union to give to the string string for processing. - * - * @param val The value we wish to convert to a string - * @return the string value of val - */ - std::string LogValueToString(threading::Value *val); - - /** - * Takes a field type and converts it to a relevant DataSeries type. - * - * @param field We extract the type from this and convert it into a relevant DS type. - * @return String representation of type that DataSeries can understand. - */ - string GetDSFieldType(const threading::Field *field); - - /** - * Are there any options we should put into the XML schema? - * - * @param field We extract the type from this and return any options that make sense for that type. - * @return Options that can be added directly to the XML (e.g. "pack_relative=\"yes\"") - */ - std::string GetDSOptionsForType(const threading::Field *field); - - /** - * Takes a list of types, a list of names, and a title, and uses it to construct a valid DataSeries XML schema - * thing, which is then returned as a std::string - * - * @param opts std::vector of strings containing a list of options to be appended to each field (e.g. "pack_relative=yes") - * @param sTitle Name of this schema. Ideally, these schemas would be aggregated and re-used. - */ - string BuildDSSchemaFromFieldTypes(const vector& vals, string sTitle); - - /** Closes the currently open file. */ - void CloseLog(); - - /** Opens a new file. */ - bool OpenLog(string path); - - typedef std::map ExtentMap; - typedef ExtentMap::iterator ExtentIterator; - - // Internal DataSeries structures we need to keep track of. - vector schema_list; - ExtentTypeLibrary log_types; - ExtentType::Ptr log_type; - ExtentSeries log_series; - ExtentMap extents; - int compress_type; - - DataSeriesSink* log_file; - OutputModule* log_output; - - // Options set from the script-level. - uint64 ds_extent_size; - uint64 ds_num_threads; - string ds_compression; - bool ds_dump_schema; - bool ds_use_integer_for_time; - string ds_set_separator; - - threading::formatter::Ascii* ascii; -}; - -} -} - -#endif - diff --git a/src/logging/writers/ElasticSearch.cc b/src/logging/writers/ElasticSearch.cc deleted file mode 100644 index 0dd1e1097c..0000000000 --- a/src/logging/writers/ElasticSearch.cc +++ /dev/null @@ -1,295 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. -// -// This is experimental code that is not yet ready for production usage. -// - - -#include "config.h" - -#ifdef USE_ELASTICSEARCH - -#include "util.h" // Needs to come first for stdint.h - -#include -#include - -#include "BroString.h" -#include "NetVar.h" -#include "threading/SerialTypes.h" - -#include -#include - -#include "ElasticSearch.h" - -using namespace logging; -using namespace writer; -using threading::Value; -using threading::Field; - -ElasticSearch::ElasticSearch(WriterFrontend* frontend) : WriterBackend(frontend) - { - cluster_name_len = BifConst::LogElasticSearch::cluster_name->Len(); - cluster_name = new char[cluster_name_len + 1]; - memcpy(cluster_name, BifConst::LogElasticSearch::cluster_name->Bytes(), cluster_name_len); - cluster_name[cluster_name_len] = 0; - - index_prefix = string((const char*) BifConst::LogElasticSearch::index_prefix->Bytes(), BifConst::LogElasticSearch::index_prefix->Len()); - - es_server = string(Fmt("http://%s:%d", BifConst::LogElasticSearch::server_host->Bytes(), - (int) BifConst::LogElasticSearch::server_port)); - bulk_url = string(Fmt("%s/_bulk", es_server.c_str())); - - http_headers = curl_slist_append(NULL, "Content-Type: text/json; charset=utf-8"); - buffer.Clear(); - counter = 0; - current_index = string(); - prev_index = string(); - last_send = current_time(); - failing = false; - - transfer_timeout = static_cast(BifConst::LogElasticSearch::transfer_timeout); - - curl_handle = HTTPSetup(); - - json = new threading::formatter::JSON(this, threading::formatter::JSON::TS_MILLIS); -} - -ElasticSearch::~ElasticSearch() - { - delete [] cluster_name; - delete json; - } - -bool ElasticSearch::DoInit(const WriterInfo& info, int num_fields, const threading::Field* const* fields) - { - return true; - } - -bool ElasticSearch::DoFlush(double network_time) - { - BatchIndex(); - return true; - } - -bool ElasticSearch::DoFinish(double network_time) - { - BatchIndex(); - curl_slist_free_all(http_headers); - curl_easy_cleanup(curl_handle); - return true; - } - -bool ElasticSearch::BatchIndex() - { - curl_easy_reset(curl_handle); - curl_easy_setopt(curl_handle, CURLOPT_URL, bulk_url.c_str()); - curl_easy_setopt(curl_handle, CURLOPT_POST, 1); - curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)buffer.Len()); - curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, buffer.Bytes()); - failing = ! HTTPSend(curl_handle); - - // We are currently throwing the data out regardless of if the send failed. Fire and forget! - buffer.Clear(); - counter = 0; - last_send = current_time(); - - return true; - } - -bool ElasticSearch::DoWrite(int num_fields, const Field* const * fields, - Value** vals) - { - if ( current_index.empty() ) - UpdateIndex(network_time, Info().rotation_interval, Info().rotation_base); - - // Our action line looks like: - buffer.AddRaw("{\"index\":{\"_index\":\"", 20); - buffer.Add(current_index); - buffer.AddRaw("\",\"_type\":\"", 11); - buffer.Add(Info().path); - buffer.AddRaw("\"}}\n", 4); - - json->Describe(&buffer, num_fields, fields, vals); - - buffer.AddRaw("\n", 1); - - counter++; - if ( counter >= BifConst::LogElasticSearch::max_batch_size || - uint(buffer.Len()) >= BifConst::LogElasticSearch::max_byte_size ) - BatchIndex(); - - return true; - } - -bool ElasticSearch::UpdateIndex(double now, double rinterval, double rbase) - { - if ( rinterval == 0 ) - { - // if logs aren't being rotated, don't use a rotation oriented index name. - current_index = index_prefix; - } - else - { - double nr = calc_next_rotate(now, rinterval, rbase); - double interval_beginning = now - (rinterval - nr); - - struct tm tm; - char buf[128]; - time_t teatime = (time_t)interval_beginning; - localtime_r(&teatime, &tm); - strftime(buf, sizeof(buf), "%Y%m%d%H%M", &tm); - - prev_index = current_index; - current_index = index_prefix + "-" + buf; - - // Send some metadata about this index. - buffer.AddRaw("{\"index\":{\"_index\":\"@", 21); - buffer.Add(index_prefix); - buffer.AddRaw("-meta\",\"_type\":\"index\",\"_id\":\"", 30); - buffer.Add(current_index); - buffer.AddRaw("-", 1); - buffer.Add(Info().rotation_base); - buffer.AddRaw("-", 1); - buffer.Add(Info().rotation_interval); - buffer.AddRaw("\"}}\n{\"name\":\"", 13); - buffer.Add(current_index); - buffer.AddRaw("\",\"start\":", 10); - buffer.Add(interval_beginning); - buffer.AddRaw(",\"end\":", 7); - buffer.Add(interval_beginning+rinterval); - buffer.AddRaw("}\n", 2); - } - - //printf("%s - prev:%s current:%s\n", Info().path.c_str(), prev_index.c_str(), current_index.c_str()); - return true; - } - - -bool ElasticSearch::DoRotate(const char* rotated_path, double open, double close, bool terminating) - { - // Update the currently used index to the new rotation interval. - UpdateIndex(close, Info().rotation_interval, Info().rotation_base); - - // Only do this stuff if there was a previous index. - if ( ! prev_index.empty() ) - { - // FIXME: I think this section is taking too long and causing the thread to die. - - // Compress the previous index - //curl_easy_reset(curl_handle); - //curl_easy_setopt(curl_handle, CURLOPT_URL, Fmt("%s/%s/_settings", es_server.c_str(), prev_index.c_str())); - //curl_easy_setopt(curl_handle, CURLOPT_CUSTOMREQUEST, "PUT"); - //curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, "{\"index\":{\"store.compress.stored\":\"true\"}}"); - //curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t) 42); - //HTTPSend(curl_handle); - - // Optimize the previous index. - // TODO: make this into variables. - //curl_easy_reset(curl_handle); - //curl_easy_setopt(curl_handle, CURLOPT_URL, Fmt("%s/%s/_optimize?max_num_segments=1&wait_for_merge=false", es_server.c_str(), prev_index.c_str())); - //HTTPSend(curl_handle); - } - - if ( ! FinishedRotation(current_index.c_str(), prev_index.c_str(), open, close, terminating) ) - Error(Fmt("error rotating %s to %s", prev_index.c_str(), current_index.c_str())); - - return true; - } - -bool ElasticSearch::DoSetBuf(bool enabled) - { - // Nothing to do. - return true; - } - -bool ElasticSearch::DoHeartbeat(double network_time, double current_time) - { - if ( last_send > 0 && buffer.Len() > 0 && - current_time-last_send > BifConst::LogElasticSearch::max_batch_interval ) - { - BatchIndex(); - } - - return true; - } - - -CURL* ElasticSearch::HTTPSetup() - { - CURL* handle = curl_easy_init(); - if ( ! handle ) - { - Error("cURL did not initialize correctly."); - return 0; - } - - return handle; - } - -size_t ElasticSearch::HTTPReceive(void* ptr, int size, int nmemb, void* userdata) - { - //TODO: Do some verification on the result? - return size; - } - -bool ElasticSearch::HTTPSend(CURL *handle) - { - curl_easy_setopt(handle, CURLOPT_HTTPHEADER, http_headers); - curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, &logging::writer::ElasticSearch::HTTPReceive); // This gets called with the result. - // HTTP 1.1 likes to use chunked encoded transfers, which aren't good for speed. - // The best (only?) way to disable that is to just use HTTP 1.0 - curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); - - // Some timeout options. These will need more attention later. - curl_easy_setopt(handle, CURLOPT_NOSIGNAL, 1); - curl_easy_setopt(handle, CURLOPT_CONNECTTIMEOUT, transfer_timeout); - curl_easy_setopt(handle, CURLOPT_TIMEOUT, transfer_timeout); - curl_easy_setopt(handle, CURLOPT_DNS_CACHE_TIMEOUT, 60*60); - - CURLcode return_code = curl_easy_perform(handle); - - switch ( return_code ) - { - case CURLE_COULDNT_CONNECT: - case CURLE_COULDNT_RESOLVE_HOST: - case CURLE_WRITE_ERROR: - case CURLE_RECV_ERROR: - { - if ( ! failing ) - Error(Fmt("ElasticSearch server may not be accessible.")); - - break; - } - - case CURLE_OPERATION_TIMEDOUT: - { - if ( ! failing ) - Warning(Fmt("HTTP operation with elasticsearch server timed out at %" PRIu64 " msecs.", transfer_timeout)); - - break; - } - - case CURLE_OK: - { - long http_code = 0; - curl_easy_getinfo(curl_handle, CURLINFO_RESPONSE_CODE, &http_code); - if ( http_code == 200 ) - // Hopefully everything goes through here. - return true; - else if ( ! failing ) - Error(Fmt("Received a non-successful status code back from ElasticSearch server, check the elasticsearch server log.")); - - break; - } - - default: - { - break; - } - } - // The "successful" return happens above - return false; - } - -#endif diff --git a/src/logging/writers/ElasticSearch.h b/src/logging/writers/ElasticSearch.h deleted file mode 100644 index 283fff2972..0000000000 --- a/src/logging/writers/ElasticSearch.h +++ /dev/null @@ -1,84 +0,0 @@ -// See the file "COPYING" in the main distribution directory for copyright. -// -// Log writer for writing to an ElasticSearch database -// -// This is experimental code that is not yet ready for production usage. -// - -#ifndef LOGGING_WRITER_ELASTICSEARCH_H -#define LOGGING_WRITER_ELASTICSEARCH_H - -#include -#include "threading/formatters/JSON.h" -#include "../WriterBackend.h" - -namespace logging { namespace writer { - -class ElasticSearch : public WriterBackend { -public: - ElasticSearch(WriterFrontend* frontend); - ~ElasticSearch(); - - static WriterBackend* Instantiate(WriterFrontend* frontend) - { return new ElasticSearch(frontend); } - static string LogExt(); - -protected: - // Overidden from WriterBackend. - - virtual bool DoInit(const WriterInfo& info, int num_fields, - const threading::Field* const* fields); - - virtual bool DoWrite(int num_fields, const threading::Field* const* fields, - threading::Value** vals); - virtual bool DoSetBuf(bool enabled); - virtual bool DoRotate(const char* rotated_path, double open, - double close, bool terminating); - virtual bool DoFlush(double network_time); - virtual bool DoFinish(double network_time); - virtual bool DoHeartbeat(double network_time, double current_time); - -private: - bool AddFieldToBuffer(ODesc *b, threading::Value* val, const threading::Field* field); - bool AddValueToBuffer(ODesc *b, threading::Value* val); - bool BatchIndex(); - bool SendMappings(); - bool UpdateIndex(double now, double rinterval, double rbase); - - CURL* HTTPSetup(); - size_t HTTPReceive(void* ptr, int size, int nmemb, void* userdata); - bool HTTPSend(CURL *handle); - - // Buffers, etc. - ODesc buffer; - uint64 counter; - double last_send; - string current_index; - string prev_index; - - CURL* curl_handle; - - // From scripts - char* cluster_name; - int cluster_name_len; - - string es_server; - string bulk_url; - - struct curl_slist *http_headers; - - string path; - string index_prefix; - long transfer_timeout; - bool failing; - - uint64 batch_size; - - threading::formatter::JSON* json; -}; - -} -} - - -#endif diff --git a/src/logging/writers/Ascii.cc b/src/logging/writers/ascii/Ascii.cc similarity index 99% rename from src/logging/writers/Ascii.cc rename to src/logging/writers/ascii/Ascii.cc index fe79089b04..a27553916f 100644 --- a/src/logging/writers/Ascii.cc +++ b/src/logging/writers/ascii/Ascii.cc @@ -5,10 +5,10 @@ #include #include -#include "NetVar.h" #include "threading/SerialTypes.h" #include "Ascii.h" +#include "ascii.bif.h" using namespace logging::writer; using namespace threading; diff --git a/src/logging/writers/Ascii.h b/src/logging/writers/ascii/Ascii.h similarity index 98% rename from src/logging/writers/Ascii.h rename to src/logging/writers/ascii/Ascii.h index 54402cc141..8648070111 100644 --- a/src/logging/writers/Ascii.h +++ b/src/logging/writers/ascii/Ascii.h @@ -5,7 +5,7 @@ #ifndef LOGGING_WRITER_ASCII_H #define LOGGING_WRITER_ASCII_H -#include "../WriterBackend.h" +#include "logging/WriterBackend.h" #include "threading/formatters/Ascii.h" #include "threading/formatters/JSON.h" @@ -16,9 +16,10 @@ public: Ascii(WriterFrontend* frontend); ~Ascii(); + static string LogExt(); + static WriterBackend* Instantiate(WriterFrontend* frontend) { return new Ascii(frontend); } - static string LogExt(); protected: virtual bool DoInit(const WriterInfo& info, int num_fields, diff --git a/src/logging/writers/ascii/CMakeLists.txt b/src/logging/writers/ascii/CMakeLists.txt new file mode 100644 index 0000000000..0cb0357a0d --- /dev/null +++ b/src/logging/writers/ascii/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro AsciiWriter) +bro_plugin_cc(Ascii.cc Plugin.cc) +bro_plugin_bif(ascii.bif) +bro_plugin_end() diff --git a/src/logging/writers/ascii/Plugin.cc b/src/logging/writers/ascii/Plugin.cc new file mode 100644 index 0000000000..4dcefda47b --- /dev/null +++ b/src/logging/writers/ascii/Plugin.cc @@ -0,0 +1,25 @@ +// See the file in the main distribution directory for copyright. + + +#include "plugin/Plugin.h" + +#include "Ascii.h" + +namespace plugin { +namespace Bro_AsciiWriter { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::logging::Component("Ascii", ::logging::writer::Ascii::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::AsciiWriter"; + config.description = "ASCII log writer"; + return config; + } +} plugin; + +} +} diff --git a/src/logging/writers/ascii/ascii.bif b/src/logging/writers/ascii/ascii.bif new file mode 100644 index 0000000000..2817511152 --- /dev/null +++ b/src/logging/writers/ascii/ascii.bif @@ -0,0 +1,14 @@ + +# Options for the ASCII writer. + +module LogAscii; + +const output_to_stdout: bool; +const include_meta: bool; +const meta_prefix: string; +const separator: string; +const set_separator: string; +const empty_field: string; +const unset_field: string; +const use_json: bool; +const json_timestamps: JSON::TimestampFormat; diff --git a/src/logging/writers/none/CMakeLists.txt b/src/logging/writers/none/CMakeLists.txt new file mode 100644 index 0000000000..f6e1265772 --- /dev/null +++ b/src/logging/writers/none/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro NoneWriter) +bro_plugin_cc(None.cc Plugin.cc) +bro_plugin_bif(none.bif) +bro_plugin_end() diff --git a/src/logging/writers/None.cc b/src/logging/writers/none/None.cc similarity index 98% rename from src/logging/writers/None.cc rename to src/logging/writers/none/None.cc index 9b91b82199..0bd507e1f8 100644 --- a/src/logging/writers/None.cc +++ b/src/logging/writers/none/None.cc @@ -2,7 +2,7 @@ #include #include "None.h" -#include "NetVar.h" +#include "none.bif.h" using namespace logging; using namespace writer; diff --git a/src/logging/writers/None.h b/src/logging/writers/none/None.h similarity index 96% rename from src/logging/writers/None.h rename to src/logging/writers/none/None.h index 2a6f71a06a..fda9a35330 100644 --- a/src/logging/writers/None.h +++ b/src/logging/writers/none/None.h @@ -5,7 +5,7 @@ #ifndef LOGGING_WRITER_NONE_H #define LOGGING_WRITER_NONE_H -#include "../WriterBackend.h" +#include "logging/WriterBackend.h" namespace logging { namespace writer { diff --git a/src/logging/writers/none/Plugin.cc b/src/logging/writers/none/Plugin.cc new file mode 100644 index 0000000000..f712e7408c --- /dev/null +++ b/src/logging/writers/none/Plugin.cc @@ -0,0 +1,25 @@ +// See the file in the main distribution directory for copyright. + + +#include "plugin/Plugin.h" + +#include "None.h" + +namespace plugin { +namespace Bro_NoneWriter { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::logging::Component("None", ::logging::writer::None::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::NoneWriter"; + config.description = "None log writer (primarily for debugging)"; + return config; + } +} plugin; + +} +} diff --git a/src/logging/writers/none/none.bif b/src/logging/writers/none/none.bif new file mode 100644 index 0000000000..2225851c55 --- /dev/null +++ b/src/logging/writers/none/none.bif @@ -0,0 +1,6 @@ + +# Options for the None writer. + +module LogNone; + +const debug: bool; diff --git a/src/logging/writers/sqlite/CMakeLists.txt b/src/logging/writers/sqlite/CMakeLists.txt new file mode 100644 index 0000000000..ce25251679 --- /dev/null +++ b/src/logging/writers/sqlite/CMakeLists.txt @@ -0,0 +1,9 @@ + +include(BroPlugin) + +include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}) + +bro_plugin_begin(Bro SQLiteWriter) +bro_plugin_cc(SQLite.cc Plugin.cc) +bro_plugin_bif(sqlite.bif) +bro_plugin_end() diff --git a/src/logging/writers/sqlite/Plugin.cc b/src/logging/writers/sqlite/Plugin.cc new file mode 100644 index 0000000000..75e6497c99 --- /dev/null +++ b/src/logging/writers/sqlite/Plugin.cc @@ -0,0 +1,25 @@ +// See the file in the main distribution directory for copyright. + + +#include "plugin/Plugin.h" + +#include "SQLite.h" + +namespace plugin { +namespace Bro_SQLiteWriter { + +class Plugin : public plugin::Plugin { +public: + plugin::Configuration Configure() + { + AddComponent(new ::logging::Component("SQLite", ::logging::writer::SQLite::Instantiate)); + + plugin::Configuration config; + config.name = "Bro::SQLiteWriter"; + config.description = "SQLite log writer"; + return config; + } +} plugin; + +} +} diff --git a/src/logging/writers/SQLite.cc b/src/logging/writers/sqlite/SQLite.cc similarity index 99% rename from src/logging/writers/SQLite.cc rename to src/logging/writers/sqlite/SQLite.cc index 44d01ec73f..090810055d 100644 --- a/src/logging/writers/SQLite.cc +++ b/src/logging/writers/sqlite/SQLite.cc @@ -6,10 +6,10 @@ #include #include -#include "../../NetVar.h" -#include "../../threading/SerialTypes.h" +#include "threading/SerialTypes.h" #include "SQLite.h" +#include "sqlite.bif.h" using namespace logging; using namespace writer; diff --git a/src/logging/writers/SQLite.h b/src/logging/writers/sqlite/SQLite.h similarity index 97% rename from src/logging/writers/SQLite.h rename to src/logging/writers/sqlite/SQLite.h index a962e903ff..a820530456 100644 --- a/src/logging/writers/SQLite.h +++ b/src/logging/writers/sqlite/SQLite.h @@ -7,8 +7,7 @@ #include "config.h" -#include "../WriterBackend.h" - +#include "logging/WriterBackend.h" #include "threading/formatters/Ascii.h" #include "3rdparty/sqlite3.h" diff --git a/src/logging/writers/sqlite/sqlite.bif b/src/logging/writers/sqlite/sqlite.bif new file mode 100644 index 0000000000..29b93f3a0c --- /dev/null +++ b/src/logging/writers/sqlite/sqlite.bif @@ -0,0 +1,9 @@ + +# Options for the SQLite writer + +module LogSQLite; + +const set_separator: string; +const empty_field: string; +const unset_field: string; + diff --git a/src/main.cc b/src/main.cc index 54eff8fa92..63949c5093 100644 --- a/src/main.cc +++ b/src/main.cc @@ -12,10 +12,6 @@ #include #endif -#ifdef USE_CURL -#include -#endif - #ifdef USE_IDMEF extern "C" { #include @@ -54,13 +50,14 @@ extern "C" void OPENSSL_add_all_algorithms_conf(void); #include "threading/Manager.h" #include "input/Manager.h" #include "logging/Manager.h" -#include "logging/writers/Ascii.h" -#include "input/readers/Raw.h" +#include "logging/writers/ascii/Ascii.h" +#include "input/readers/raw/Raw.h" #include "analyzer/Manager.h" #include "analyzer/Tag.h" #include "plugin/Manager.h" #include "file_analysis/Manager.h" #include "broxygen/Manager.h" +#include "iosource/Manager.h" #include "binpac_bro.h" @@ -96,6 +93,7 @@ plugin::Manager* plugin_mgr = 0; analyzer::Manager* analyzer_mgr = 0; file_analysis::Manager* file_mgr = 0; broxygen::Manager* broxygen_mgr = 0; +iosource::Manager* iosource_mgr = 0; Stmt* stmts; EventHandlerPtr net_done = 0; RuleMatcher* rule_matcher = 0; @@ -112,7 +110,6 @@ int signal_val = 0; int optimize = 0; int do_notice_analysis = 0; int rule_bench = 0; -SecondaryPath* secondary_path = 0; extern char version[]; char* command_line_policy = 0; vector params; @@ -227,25 +224,6 @@ void usage() fprintf(stderr, " $BRO_PROFILER_FILE | Output file for script execution statistics (not set)\n"); fprintf(stderr, " $BRO_DISABLE_BROXYGEN | Disable Broxygen documentation support (%s)\n", getenv("BRO_DISABLE_BROXYGEN") ? "set" : "not set"); - fprintf(stderr, "\n"); - fprintf(stderr, " Supported log formats: "); - - bool first = true; - list fmts = logging::Manager::SupportedFormats(); - - for ( list::const_iterator i = fmts.begin(); i != fmts.end(); ++i ) - { - if ( *i == "None" ) - // Skip, it's uninteresting. - continue; - - if ( ! first ) - fprintf(stderr, ","); - - fprintf(stderr, "%s", (*i).c_str()); - first = false; - } - fprintf(stderr, "\n"); exit(1); @@ -363,6 +341,10 @@ void terminate_bro() terminating = true; + // File analysis termination may produce events, so do it early on in + // the termination process. + file_mgr->Terminate(); + brofiler.WriteStats(); EventHandlerPtr bro_done = internal_handler("bro_done"); @@ -387,7 +369,6 @@ void terminate_bro() mgr.Drain(); - file_mgr->Terminate(); log_mgr->Terminate(); input_mgr->Terminate(); thread_mgr->Terminate(); @@ -398,20 +379,16 @@ void terminate_bro() delete broxygen_mgr; delete timer_mgr; - delete dns_mgr; delete persistence_serializer; - delete event_player; delete event_serializer; delete state_serializer; delete event_registry; - delete secondary_path; - delete remote_serializer; delete analyzer_mgr; delete file_mgr; delete log_mgr; delete plugin_mgr; - delete thread_mgr; delete reporter; + delete iosource_mgr; reporter = 0; } @@ -471,8 +448,6 @@ int main(int argc, char** argv) name_list interfaces; name_list read_files; - name_list netflows; - name_list flow_files; name_list rule_files; char* bst_file = 0; char* id_name = 0; @@ -574,7 +549,7 @@ int main(int argc, char** argv) opterr = 0; char opts[256]; - safe_strncpy(opts, "B:D:e:f:I:i:K:l:n:p:R:r:s:T:t:U:w:x:X:y:Y:z:CFGLNOPSWabdghvZQ", + safe_strncpy(opts, "B:D:e:f:I:i:K:l:n:p:R:r:s:T:t:U:w:x:X:z:CFGLNOPSWabdghvZQ", sizeof(opts)); #ifdef USE_PERFTOOLS_DEBUG @@ -634,10 +609,6 @@ int main(int argc, char** argv) writefile = optarg; break; - case 'y': - flow_files.append(optarg); - break; - case 'z': if ( streq(optarg, "notice") ) do_notice_analysis = 1; @@ -731,10 +702,6 @@ int main(int argc, char** argv) do_watchdog = 1; break; - case 'Y': - netflows.append(optarg); - break; - case 'h': usage(); break; @@ -813,10 +780,6 @@ int main(int argc, char** argv) SSL_library_init(); SSL_load_error_strings(); -#ifdef USE_CURL - curl_global_init(CURL_GLOBAL_ALL); -#endif - int r = sqlite3_initialize(); if ( r != SQLITE_OK ) @@ -826,8 +789,7 @@ int main(int argc, char** argv) // seed the PRNG. We should do this here (but at least Linux, FreeBSD // and Solaris provide /dev/urandom). - if ( (interfaces.length() > 0 || netflows.length() > 0) && - (read_files.length() > 0 || flow_files.length() > 0 )) + if ( interfaces.length() > 0 && read_files.length() > 0 ) usage(); #ifdef USE_IDMEF @@ -850,7 +812,7 @@ int main(int argc, char** argv) plugin_mgr->SearchDynamicPlugins(bro_plugin_path()); if ( optind == argc && - read_files.length() == 0 && flow_files.length() == 0 && + read_files.length() == 0 && interfaces.length() == 0 && ! (id_name || bst_file) && ! command_line_policy && ! print_plugins ) add_input_file("-"); @@ -877,6 +839,7 @@ int main(int argc, char** argv) // policy, but we can't parse policy without DNS resolution. dns_mgr->SetDir(".state"); + iosource_mgr = new iosource::Manager(); persistence_serializer = new PersistenceSerializer(); remote_serializer = new RemoteSerializer(); event_registry = new EventRegistry(); @@ -890,9 +853,17 @@ int main(int argc, char** argv) file_mgr->InitPreScript(); broxygen_mgr->InitPreScript(); + bool missing_plugin = false; + for ( set::const_iterator i = requested_plugins.begin(); i != requested_plugins.end(); i++ ) - plugin_mgr->ActivateDynamicPlugin(*i); + { + if ( ! plugin_mgr->ActivateDynamicPlugin(*i) ) + missing_plugin = true; + } + + if ( missing_plugin ) + reporter->FatalError("Failed to activate requested dynamic plugin(s)."); plugin_mgr->ActivateDynamicPlugins(! bare_mode); @@ -901,8 +872,6 @@ int main(int argc, char** argv) init_event_handlers(); - input::reader::Raw::ClassInit(); - md5_type = new OpaqueType("md5"); sha1_type = new OpaqueType("sha1"); sha256_type = new OpaqueType("sha256"); @@ -945,6 +914,7 @@ int main(int argc, char** argv) analyzer_mgr->InitPostScript(); file_mgr->InitPostScript(); + dns_mgr->InitPostScript(); if ( parse_only ) { @@ -1010,8 +980,7 @@ int main(int argc, char** argv) // ### Add support for debug command file. dbg_init_debugger(0); - if ( (flow_files.length() == 0 || read_files.length() == 0) && - (netflows.length() == 0 || interfaces.length() == 0) ) + if ( read_files.length() == 0 && interfaces.length() == 0 ) { Val* interfaces_val = internal_val("interfaces"); if ( interfaces_val ) @@ -1028,13 +997,8 @@ int main(int argc, char** argv) snaplen = internal_val("snaplen")->AsCount(); - // Initialize the secondary path, if it's needed. - secondary_path = new SecondaryPath(); - if ( dns_type != DNS_PRIME ) - net_init(interfaces, read_files, netflows, flow_files, - writefile, "", - secondary_path->Filter(), do_watchdog); + net_init(interfaces, read_files, writefile, do_watchdog); BroFile::SetDefaultRotation(log_rotate_interval, log_max_size); @@ -1193,9 +1157,9 @@ int main(int argc, char** argv) have_pending_timers = ! reading_traces && timer_mgr->Size() > 0; - io_sources.Register(thread_mgr, true); + iosource_mgr->Register(thread_mgr, true); - if ( io_sources.Size() > 0 || + if ( iosource_mgr->Size() > 0 || have_pending_timers || BifConst::exit_only_after_terminate ) { @@ -1254,10 +1218,6 @@ int main(int argc, char** argv) done_with_network(); net_delete(); -#ifdef USE_CURL - curl_global_cleanup(); -#endif - terminate_bro(); sqlite3_shutdown(); diff --git a/src/net_util.h b/src/net_util.h index 0f34335267..d68a7110ce 100644 --- a/src/net_util.h +++ b/src/net_util.h @@ -180,8 +180,11 @@ extern uint32 extract_uint32(const u_char* data); inline double ntohd(double d) { return d; } inline double htond(double d) { return d; } + +#ifndef HAVE_BYTEORDER_64 inline uint64 ntohll(uint64 i) { return i; } inline uint64 htonll(uint64 i) { return i; } +#endif #else @@ -207,6 +210,7 @@ inline double ntohd(double d) inline double htond(double d) { return ntohd(d); } +#ifndef HAVE_BYTEORDER_64 inline uint64 ntohll(uint64 i) { u_char c; @@ -224,6 +228,7 @@ inline uint64 ntohll(uint64 i) } inline uint64 htonll(uint64 i) { return ntohll(i); } +#endif #endif diff --git a/src/parse.y b/src/parse.y index 0289184055..83760dbbf0 100644 --- a/src/parse.y +++ b/src/parse.y @@ -127,7 +127,11 @@ static void parser_new_enum (void) { /* Starting a new enum definition. */ assert(cur_enum_type == NULL); - cur_enum_type = new EnumType(cur_decl_type_id->Name()); + + if ( cur_decl_type_id ) + cur_enum_type = new EnumType(cur_decl_type_id->Name()); + else + reporter->FatalError("incorrect syntax for enum type declaration"); } static void parser_redef_enum (ID *id) diff --git a/src/plugin/Component.cc b/src/plugin/Component.cc index 006806d1fb..4ace2f96af 100644 --- a/src/plugin/Component.cc +++ b/src/plugin/Component.cc @@ -50,6 +50,18 @@ void Component::Describe(ODesc* d) const d->Add("File Analyzer"); break; + case component::IOSOURCE: + d->Add("I/O Source"); + break; + + case component::PKTSRC: + d->Add("Packet Source"); + break; + + case component::PKTDUMPER: + d->Add("Packet Dumper"); + break; + default: reporter->InternalWarning("unknown component type in plugin::Component::Describe"); d->Add(""); diff --git a/src/plugin/Component.h b/src/plugin/Component.h index 71547393e8..de24a7dbde 100644 --- a/src/plugin/Component.h +++ b/src/plugin/Component.h @@ -12,20 +12,23 @@ namespace plugin { namespace component { /** - * Component types. + * Component types. */ enum Type { READER, /// An input reader (not currently used). WRITER, /// A logging writer (not currenly used). ANALYZER, /// A protocol analyzer. - FILE_ANALYZER /// A file analyzer. + FILE_ANALYZER, /// A file analyzer. + IOSOURCE, /// An I/O source, excluding packet sources. + PKTSRC, /// A packet source. + PKTDUMPER /// A packet dumper. }; } /** * Base class for plugin components. A component is a specific piece of * functionality that a plugin provides, such as a protocol analyzer or a log - * writer. + * writer. */ class Component { diff --git a/src/plugin/ComponentManager.h b/src/plugin/ComponentManager.h index 25b2a5f977..7337cf069a 100644 --- a/src/plugin/ComponentManager.h +++ b/src/plugin/ComponentManager.h @@ -27,13 +27,16 @@ class ComponentManager { public: /** - * Constructor creates a new enum type called a "Tag" to associate with + * Constructor creates a new enum type to associate with * a component. * - * @param module The script-layer module in which to install the "Tag" ID + * @param module The script-layer module in which to install the ID * representing an enum type. + * + * @param local_id The local part of the ID of the new enum type + * (e.g., "Tag"). */ - ComponentManager(const string& module); + ComponentManager(const string& module, const string& local_id); /** * @return The script-layer module in which the component's "Tag" ID lives. @@ -125,11 +128,11 @@ private: }; template -ComponentManager::ComponentManager(const string& arg_module) +ComponentManager::ComponentManager(const string& arg_module, const string& local_id) : module(arg_module) { - tag_enum_type = new EnumType(module + "::Tag"); - ::ID* id = install_ID("Tag", module.c_str(), true, true); + tag_enum_type = new EnumType(module + "::" + local_id); + ::ID* id = install_ID(local_id.c_str(), module.c_str(), true, true); add_type(id, tag_enum_type, 0); broxygen_mgr->Identifier(id); } diff --git a/src/plugin/Plugin.h b/src/plugin/Plugin.h index 978e22b634..ccda20054c 100644 --- a/src/plugin/Plugin.h +++ b/src/plugin/Plugin.h @@ -9,6 +9,7 @@ #include "config.h" #include "analyzer/Component.h" #include "file_analysis/Component.h" +#include "iosource/Component.h" // We allow to override this externally for testing purposes. #ifndef BRO_PLUGIN_API_VERSION diff --git a/src/strings.bif b/src/strings.bif index f50eb1f89b..4a30ca2aa4 100644 --- a/src/strings.bif +++ b/src/strings.bif @@ -308,7 +308,8 @@ function edit%(arg_s: string, arg_edit_char: string%): string ## ## s: The string to obtain a substring from. ## -## start: The starting position of the substring in *s* +## start: The starting position of the substring in *s*, where 1 is the first +## character. As a special case, 0 also represents the first character. ## ## n: The number of characters to extract, beginning at *start*. ## diff --git a/src/threading/Manager.cc b/src/threading/Manager.cc index 4491cd42b5..449f2a8ad1 100644 --- a/src/threading/Manager.cc +++ b/src/threading/Manager.cc @@ -11,7 +11,7 @@ Manager::Manager() did_process = true; next_beat = 0; terminating = false; - idle = true; + SetIdle(true); } Manager::~Manager() @@ -47,8 +47,8 @@ void Manager::Terminate() all_threads.clear(); msg_threads.clear(); - idle = true; - closed = true; + SetIdle(true); + SetClosed(true); terminating = false; } @@ -56,7 +56,7 @@ void Manager::AddThread(BasicThread* thread) { DBG_LOG(DBG_THREADING, "Adding thread %s ...", thread->Name()); all_threads.push_back(thread); - idle = false; + SetIdle(false); } void Manager::AddMsgThread(MsgThread* thread) @@ -65,7 +65,8 @@ void Manager::AddMsgThread(MsgThread* thread) msg_threads.push_back(thread); } -void Manager::GetFds(int* read, int* write, int* except) +void Manager::GetFds(iosource::FD_Set* read, iosource::FD_Set* write, + iosource::FD_Set* except) { } diff --git a/src/threading/Manager.h b/src/threading/Manager.h index e839749a91..70e592fa10 100644 --- a/src/threading/Manager.h +++ b/src/threading/Manager.h @@ -4,7 +4,7 @@ #include -#include "IOSource.h" +#include "iosource/IOSource.h" #include "BasicThread.h" #include "MsgThread.h" @@ -21,7 +21,7 @@ namespace threading { * their outgoing message queue on a regular basis and feeds data sent into * the rest of Bro. It also triggers the regular heartbeats. */ -class Manager : public IOSource +class Manager : public iosource::IOSource { public: /** @@ -103,7 +103,8 @@ protected: /** * Part of the IOSource interface. */ - virtual void GetFds(int* read, int* write, int* except); + virtual void GetFds(iosource::FD_Set* read, iosource::FD_Set* write, + iosource::FD_Set* except); /** * Part of the IOSource interface. diff --git a/src/types.bif b/src/types.bif index a44c3c1615..99df67c9d5 100644 --- a/src/types.bif +++ b/src/types.bif @@ -163,21 +163,6 @@ type ModbusHeaders: record; type ModbusCoils: vector; type ModbusRegisters: vector; -module Log; - -enum Writer %{ - WRITER_DEFAULT, - WRITER_NONE, - WRITER_ASCII, - WRITER_DATASERIES, - WRITER_SQLITE, - WRITER_ELASTICSEARCH, -%} - -enum ID %{ - Unknown, -%} - module Tunnel; enum Type %{ NONE, @@ -191,29 +176,6 @@ enum Type %{ type EncapsulatingConn: record; -module Input; - -enum Reader %{ - READER_DEFAULT, - READER_ASCII, - READER_RAW, - READER_BENCHMARK, - READER_BINARY, - READER_SQLITE, -%} - -enum Event %{ - EVENT_NEW, - EVENT_CHANGED, - EVENT_REMOVED, -%} - -enum Mode %{ - MANUAL = 0, - REREAD = 1, - STREAM = 2, -%} - module GLOBAL; type gtpv1_hdr: record; diff --git a/src/util.cc b/src/util.cc index 15fe198f12..60a92af45f 100644 --- a/src/util.cc +++ b/src/util.cc @@ -43,6 +43,7 @@ #include "NetVar.h" #include "Net.h" #include "Reporter.h" +#include "iosource/Manager.h" /** * Return IP address without enclosing brackets and any leading 0x. @@ -1424,11 +1425,13 @@ double current_time(bool real) double t = double(tv.tv_sec) + double(tv.tv_usec) / 1e6; - if ( ! pseudo_realtime || real || pkt_srcs.length() == 0 ) + const iosource::Manager::PktSrcList& pkt_srcs(iosource_mgr->GetPktSrcs()); + + if ( ! pseudo_realtime || real || pkt_srcs.empty() ) return t; // This obviously only works for a single source ... - PktSrc* src = pkt_srcs[0]; + iosource::PktSrc* src = pkt_srcs.front(); if ( net_is_processing_suspended() ) return src->CurrentPacketTimestamp(); diff --git a/testing/btest/Baseline/core.pcap.dumper/output b/testing/btest/Baseline/core.pcap.dumper/output new file mode 100644 index 0000000000..1055e73ebe --- /dev/null +++ b/testing/btest/Baseline/core.pcap.dumper/output @@ -0,0 +1 @@ +00000010 ff ff 00 00 01 00 00 00 1d a2 b2 4e 73 00 07 00 | | 00000010 00 20 00 00 01 00 00 00 1d a2 b2 4e 73 00 07 00 | diff --git a/testing/btest/Baseline/core.pcap.dynamic-filter/conn.log b/testing/btest/Baseline/core.pcap.dynamic-filter/conn.log new file mode 100644 index 0000000000..f42999c4fa --- /dev/null +++ b/testing/btest/Baseline/core.pcap.dynamic-filter/conn.log @@ -0,0 +1,25 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open 2014-08-24-15-51-55 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string bool count string count count count count set[string] +1300475167.096535 CXWv6p3arKYeMETxOg 141.142.220.202 5353 224.0.0.251 5353 udp dns - - - S0 - 0 D 1 73 0 0 (empty) +1300475168.853899 CCvvfg3TEfuqmmG4bh 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 38 89 SF - 0 Dd 1 66 1 117 (empty) +1300475168.854378 CsRx2w45OKnoww6xl4 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 52 99 SF - 0 Dd 1 80 1 127 (empty) +1300475168.854837 CRJuHdVW0XPVINV8a 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF - 0 Dd 1 66 1 211 (empty) +1300475168.857956 CPbrpk1qSsw6ESzHV4 141.142.220.118 32902 141.142.2.2 53 udp dns 0.000317 38 89 SF - 0 Dd 1 66 1 117 (empty) +1300475168.858306 C6pKV8GSxOnSLghOa 141.142.220.118 59816 141.142.2.2 53 udp dns 0.000343 52 99 SF - 0 Dd 1 80 1 127 (empty) +1300475168.858713 CIPOse170MGiRM1Qf4 141.142.220.118 59714 141.142.2.2 53 udp dns 0.000375 38 183 SF - 0 Dd 1 66 1 211 (empty) +1300475168.891644 C7XEbhP654jzLoe3a 141.142.220.118 58206 141.142.2.2 53 udp dns 0.000339 38 89 SF - 0 Dd 1 66 1 117 (empty) +1300475168.892037 CJ3xTn1c4Zw9TmAE05 141.142.220.118 38911 141.142.2.2 53 udp dns 0.000335 52 99 SF - 0 Dd 1 80 1 127 (empty) +1300475168.892414 CMXxB5GvmoxJFXdTa 141.142.220.118 59746 141.142.2.2 53 udp dns 0.000421 38 183 SF - 0 Dd 1 66 1 211 (empty) +1300475168.893988 Caby8b1slFea8xwSmb 141.142.220.118 45000 141.142.2.2 53 udp dns 0.000384 38 89 SF - 0 Dd 1 66 1 117 (empty) +1300475168.894422 Che1bq3i2rO3KD1Syg 141.142.220.118 48479 141.142.2.2 53 udp dns 0.000317 52 99 SF - 0 Dd 1 80 1 127 (empty) +1300475168.894787 C3SfNE4BWaU4aSuwkc 141.142.220.118 48128 141.142.2.2 53 udp dns 0.000423 38 183 SF - 0 Dd 1 66 1 211 (empty) +1300475168.901749 CEle3f3zno26fFZkrh 141.142.220.118 56056 141.142.2.2 53 udp dns 0.000402 36 131 SF - 0 Dd 1 64 1 159 (empty) +1300475168.902195 CwSkQu4eWZCH7OONC1 141.142.220.118 55092 141.142.2.2 53 udp dns 0.000374 36 198 SF - 0 Dd 1 64 1 226 (empty) +1300475168.652003 CjhGID4nQcgTWjvg4c 141.142.220.118 35634 208.80.152.2 80 tcp - - - - OTH - 0 D 1 515 0 0 (empty) +#close 2014-08-24-15-51-55 diff --git a/testing/btest/Baseline/core.pcap.dynamic-filter/output b/testing/btest/Baseline/core.pcap.dynamic-filter/output new file mode 100644 index 0000000000..e8be0b0195 --- /dev/null +++ b/testing/btest/Baseline/core.pcap.dynamic-filter/output @@ -0,0 +1,30 @@ +1, [orig_h=141.142.220.202, orig_p=5353/udp, resp_h=224.0.0.251, resp_p=5353/udp] +2, [orig_h=141.142.220.118, orig_p=35634/tcp, resp_h=208.80.152.2, resp_p=80/tcp] +3, [orig_h=141.142.220.118, orig_p=43927/udp, resp_h=141.142.2.2, resp_p=53/udp] +4, [orig_h=141.142.220.118, orig_p=43927/udp, resp_h=141.142.2.2, resp_p=53/udp] +5, [orig_h=141.142.220.118, orig_p=37676/udp, resp_h=141.142.2.2, resp_p=53/udp] +6, [orig_h=141.142.220.118, orig_p=37676/udp, resp_h=141.142.2.2, resp_p=53/udp] +7, [orig_h=141.142.220.118, orig_p=40526/udp, resp_h=141.142.2.2, resp_p=53/udp] +8, [orig_h=141.142.220.118, orig_p=40526/udp, resp_h=141.142.2.2, resp_p=53/udp] +9, [orig_h=141.142.220.118, orig_p=32902/udp, resp_h=141.142.2.2, resp_p=53/udp] +10, [orig_h=141.142.220.118, orig_p=32902/udp, resp_h=141.142.2.2, resp_p=53/udp] +11, [orig_h=141.142.220.118, orig_p=59816/udp, resp_h=141.142.2.2, resp_p=53/udp] +12, [orig_h=141.142.220.118, orig_p=59816/udp, resp_h=141.142.2.2, resp_p=53/udp] +13, [orig_h=141.142.220.118, orig_p=59714/udp, resp_h=141.142.2.2, resp_p=53/udp] +14, [orig_h=141.142.220.118, orig_p=59714/udp, resp_h=141.142.2.2, resp_p=53/udp] +15, [orig_h=141.142.220.118, orig_p=58206/udp, resp_h=141.142.2.2, resp_p=53/udp] +16, [orig_h=141.142.220.118, orig_p=58206/udp, resp_h=141.142.2.2, resp_p=53/udp] +17, [orig_h=141.142.220.118, orig_p=38911/udp, resp_h=141.142.2.2, resp_p=53/udp] +18, [orig_h=141.142.220.118, orig_p=38911/udp, resp_h=141.142.2.2, resp_p=53/udp] +19, [orig_h=141.142.220.118, orig_p=59746/udp, resp_h=141.142.2.2, resp_p=53/udp] +20, [orig_h=141.142.220.118, orig_p=59746/udp, resp_h=141.142.2.2, resp_p=53/udp] +21, [orig_h=141.142.220.118, orig_p=45000/udp, resp_h=141.142.2.2, resp_p=53/udp] +22, [orig_h=141.142.220.118, orig_p=45000/udp, resp_h=141.142.2.2, resp_p=53/udp] +23, [orig_h=141.142.220.118, orig_p=48479/udp, resp_h=141.142.2.2, resp_p=53/udp] +24, [orig_h=141.142.220.118, orig_p=48479/udp, resp_h=141.142.2.2, resp_p=53/udp] +25, [orig_h=141.142.220.118, orig_p=48128/udp, resp_h=141.142.2.2, resp_p=53/udp] +26, [orig_h=141.142.220.118, orig_p=48128/udp, resp_h=141.142.2.2, resp_p=53/udp] +27, [orig_h=141.142.220.118, orig_p=56056/udp, resp_h=141.142.2.2, resp_p=53/udp] +28, [orig_h=141.142.220.118, orig_p=56056/udp, resp_h=141.142.2.2, resp_p=53/udp] +29, [orig_h=141.142.220.118, orig_p=55092/udp, resp_h=141.142.2.2, resp_p=53/udp] +30, [orig_h=141.142.220.118, orig_p=55092/udp, resp_h=141.142.2.2, resp_p=53/udp] diff --git a/testing/btest/Baseline/core.pcap.filter-error/output b/testing/btest/Baseline/core.pcap.filter-error/output new file mode 100644 index 0000000000..82804bb483 --- /dev/null +++ b/testing/btest/Baseline/core.pcap.filter-error/output @@ -0,0 +1,3 @@ +fatal error in /home/robin/bro/master/scripts/base/frameworks/packet-filter/./main.bro, line 282: Bad pcap filter 'kaputt' +---- +error, cannot compile BPF filter "kaputt, too" diff --git a/testing/btest/Baseline/core.pcap.input-error/output2 b/testing/btest/Baseline/core.pcap.input-error/output2 new file mode 100644 index 0000000000..74666797b9 --- /dev/null +++ b/testing/btest/Baseline/core.pcap.input-error/output2 @@ -0,0 +1,2 @@ +fatal error: problem with interface NO_SUCH_INTERFACE +fatal error: problem with trace file NO_SUCH_TRACE (NO_SUCH_TRACE: No such file or directory) diff --git a/testing/btest/Baseline/core.pcap.pseudo-realtime/output b/testing/btest/Baseline/core.pcap.pseudo-realtime/output new file mode 100644 index 0000000000..d708959ce9 --- /dev/null +++ b/testing/btest/Baseline/core.pcap.pseudo-realtime/output @@ -0,0 +1 @@ +real time matches trace time diff --git a/testing/btest/Baseline/core.pcap.read-trace-with-filter/conn.log b/testing/btest/Baseline/core.pcap.read-trace-with-filter/conn.log new file mode 100644 index 0000000000..8522d69ae6 --- /dev/null +++ b/testing/btest/Baseline/core.pcap.read-trace-with-filter/conn.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open 2014-08-23-18-29-48 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string bool count string count count count count set[string] +1300475168.892936 CXWv6p3arKYeMETxOg 141.142.220.118 50000 208.80.152.3 80 tcp http 0.229603 1148 734 S1 - 0 ShADad 6 1468 4 950 (empty) +#close 2014-08-23-18-29-48 diff --git a/testing/btest/Baseline/core.pcap.read-trace-with-filter/packet_filter.log b/testing/btest/Baseline/core.pcap.read-trace-with-filter/packet_filter.log new file mode 100644 index 0000000000..75b09c608a --- /dev/null +++ b/testing/btest/Baseline/core.pcap.read-trace-with-filter/packet_filter.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path packet_filter +#open 2014-08-23-18-29-48 +#fields ts node filter init success +#types time string string bool bool +1408818588.510297 bro port 50000 T T +#close 2014-08-23-18-29-48 diff --git a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log index 8128554281..1a8685c86a 100644 --- a/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.bare-load-baseline/canonified_loaded_scripts.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#open 2014-05-15-14-10-48 +#open 2014-09-06-01-19-42 #fields name #types string scripts/base/init-bare.bro @@ -14,6 +14,39 @@ scripts/base/init-bare.bro build/scripts/base/bif/reporter.bif.bro build/scripts/base/bif/plugins/Bro_SNMP.types.bif.bro build/scripts/base/bif/event.bif.bro + scripts/base/frameworks/logging/__load__.bro + scripts/base/frameworks/logging/main.bro + build/scripts/base/bif/logging.bif.bro + scripts/base/frameworks/logging/postprocessors/__load__.bro + scripts/base/frameworks/logging/postprocessors/scp.bro + scripts/base/frameworks/logging/postprocessors/sftp.bro + scripts/base/frameworks/logging/writers/ascii.bro + scripts/base/frameworks/logging/writers/sqlite.bro + scripts/base/frameworks/logging/writers/none.bro + scripts/base/frameworks/input/__load__.bro + scripts/base/frameworks/input/main.bro + build/scripts/base/bif/input.bif.bro + scripts/base/frameworks/input/readers/ascii.bro + scripts/base/frameworks/input/readers/raw.bro + scripts/base/frameworks/input/readers/benchmark.bro + scripts/base/frameworks/input/readers/binary.bro + scripts/base/frameworks/input/readers/sqlite.bro + scripts/base/frameworks/analyzer/__load__.bro + scripts/base/frameworks/analyzer/main.bro + scripts/base/frameworks/packet-filter/utils.bro + build/scripts/base/bif/analyzer.bif.bro + scripts/base/frameworks/files/__load__.bro + scripts/base/frameworks/files/main.bro + build/scripts/base/bif/file_analysis.bif.bro + scripts/base/utils/site.bro + scripts/base/utils/patterns.bro + scripts/base/frameworks/files/magic/__load__.bro + build/scripts/base/bif/__load__.bro + build/scripts/base/bif/broxygen.bif.bro + build/scripts/base/bif/pcap.bif.bro + build/scripts/base/bif/bloom-filter.bif.bro + build/scripts/base/bif/cardinality-counter.bif.bro + build/scripts/base/bif/top-k.bif.bro build/scripts/base/bif/plugins/__load__.bro build/scripts/base/bif/plugins/Bro_ARP.events.bif.bro build/scripts/base/bif/plugins/Bro_AYIYA.events.bif.bro @@ -71,40 +104,14 @@ scripts/base/init-bare.bro build/scripts/base/bif/plugins/Bro_X509.events.bif.bro build/scripts/base/bif/plugins/Bro_X509.types.bif.bro build/scripts/base/bif/plugins/Bro_X509.functions.bif.bro - scripts/base/frameworks/logging/__load__.bro - scripts/base/frameworks/logging/main.bro - build/scripts/base/bif/logging.bif.bro - scripts/base/frameworks/logging/postprocessors/__load__.bro - scripts/base/frameworks/logging/postprocessors/scp.bro - scripts/base/frameworks/logging/postprocessors/sftp.bro - scripts/base/frameworks/logging/writers/ascii.bro - scripts/base/frameworks/logging/writers/dataseries.bro - scripts/base/frameworks/logging/writers/sqlite.bro - scripts/base/frameworks/logging/writers/elasticsearch.bro - scripts/base/frameworks/logging/writers/none.bro - scripts/base/frameworks/input/__load__.bro - scripts/base/frameworks/input/main.bro - build/scripts/base/bif/input.bif.bro - scripts/base/frameworks/input/readers/ascii.bro - scripts/base/frameworks/input/readers/raw.bro - scripts/base/frameworks/input/readers/benchmark.bro - scripts/base/frameworks/input/readers/binary.bro - scripts/base/frameworks/input/readers/sqlite.bro - scripts/base/frameworks/analyzer/__load__.bro - scripts/base/frameworks/analyzer/main.bro - scripts/base/frameworks/packet-filter/utils.bro - build/scripts/base/bif/analyzer.bif.bro - scripts/base/frameworks/files/__load__.bro - scripts/base/frameworks/files/main.bro - build/scripts/base/bif/file_analysis.bif.bro - scripts/base/utils/site.bro - scripts/base/utils/patterns.bro - scripts/base/frameworks/files/magic/__load__.bro - build/scripts/base/bif/__load__.bro - build/scripts/base/bif/bloom-filter.bif.bro - build/scripts/base/bif/cardinality-counter.bif.bro - build/scripts/base/bif/top-k.bif.bro - build/scripts/base/bif/broxygen.bif.bro + build/scripts/base/bif/plugins/Bro_AsciiReader.ascii.bif.bro + build/scripts/base/bif/plugins/Bro_BenchmarkReader.benchmark.bif.bro + build/scripts/base/bif/plugins/Bro_BinaryReader.binary.bif.bro + build/scripts/base/bif/plugins/Bro_RawReader.raw.bif.bro + build/scripts/base/bif/plugins/Bro_SQLiteReader.sqlite.bif.bro + build/scripts/base/bif/plugins/Bro_AsciiWriter.ascii.bif.bro + build/scripts/base/bif/plugins/Bro_NoneWriter.none.bif.bro + build/scripts/base/bif/plugins/Bro_SQLiteWriter.sqlite.bif.bro scripts/policy/misc/loaded-scripts.bro scripts/base/utils/paths.bro -#close 2014-05-15-14-10-48 +#close 2014-09-06-01-19-42 diff --git a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log index 03c299141c..ebcb980eec 100644 --- a/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log +++ b/testing/btest/Baseline/coverage.default-load-baseline/canonified_loaded_scripts.log @@ -3,7 +3,7 @@ #empty_field (empty) #unset_field - #path loaded_scripts -#open 2014-05-15-14-12-26 +#open 2014-09-06-01-20-32 #fields name #types string scripts/base/init-bare.bro @@ -14,6 +14,39 @@ scripts/base/init-bare.bro build/scripts/base/bif/reporter.bif.bro build/scripts/base/bif/plugins/Bro_SNMP.types.bif.bro build/scripts/base/bif/event.bif.bro + scripts/base/frameworks/logging/__load__.bro + scripts/base/frameworks/logging/main.bro + build/scripts/base/bif/logging.bif.bro + scripts/base/frameworks/logging/postprocessors/__load__.bro + scripts/base/frameworks/logging/postprocessors/scp.bro + scripts/base/frameworks/logging/postprocessors/sftp.bro + scripts/base/frameworks/logging/writers/ascii.bro + scripts/base/frameworks/logging/writers/sqlite.bro + scripts/base/frameworks/logging/writers/none.bro + scripts/base/frameworks/input/__load__.bro + scripts/base/frameworks/input/main.bro + build/scripts/base/bif/input.bif.bro + scripts/base/frameworks/input/readers/ascii.bro + scripts/base/frameworks/input/readers/raw.bro + scripts/base/frameworks/input/readers/benchmark.bro + scripts/base/frameworks/input/readers/binary.bro + scripts/base/frameworks/input/readers/sqlite.bro + scripts/base/frameworks/analyzer/__load__.bro + scripts/base/frameworks/analyzer/main.bro + scripts/base/frameworks/packet-filter/utils.bro + build/scripts/base/bif/analyzer.bif.bro + scripts/base/frameworks/files/__load__.bro + scripts/base/frameworks/files/main.bro + build/scripts/base/bif/file_analysis.bif.bro + scripts/base/utils/site.bro + scripts/base/utils/patterns.bro + scripts/base/frameworks/files/magic/__load__.bro + build/scripts/base/bif/__load__.bro + build/scripts/base/bif/broxygen.bif.bro + build/scripts/base/bif/pcap.bif.bro + build/scripts/base/bif/bloom-filter.bif.bro + build/scripts/base/bif/cardinality-counter.bif.bro + build/scripts/base/bif/top-k.bif.bro build/scripts/base/bif/plugins/__load__.bro build/scripts/base/bif/plugins/Bro_ARP.events.bif.bro build/scripts/base/bif/plugins/Bro_AYIYA.events.bif.bro @@ -71,40 +104,14 @@ scripts/base/init-bare.bro build/scripts/base/bif/plugins/Bro_X509.events.bif.bro build/scripts/base/bif/plugins/Bro_X509.types.bif.bro build/scripts/base/bif/plugins/Bro_X509.functions.bif.bro - scripts/base/frameworks/logging/__load__.bro - scripts/base/frameworks/logging/main.bro - build/scripts/base/bif/logging.bif.bro - scripts/base/frameworks/logging/postprocessors/__load__.bro - scripts/base/frameworks/logging/postprocessors/scp.bro - scripts/base/frameworks/logging/postprocessors/sftp.bro - scripts/base/frameworks/logging/writers/ascii.bro - scripts/base/frameworks/logging/writers/dataseries.bro - scripts/base/frameworks/logging/writers/sqlite.bro - scripts/base/frameworks/logging/writers/elasticsearch.bro - scripts/base/frameworks/logging/writers/none.bro - scripts/base/frameworks/input/__load__.bro - scripts/base/frameworks/input/main.bro - build/scripts/base/bif/input.bif.bro - scripts/base/frameworks/input/readers/ascii.bro - scripts/base/frameworks/input/readers/raw.bro - scripts/base/frameworks/input/readers/benchmark.bro - scripts/base/frameworks/input/readers/binary.bro - scripts/base/frameworks/input/readers/sqlite.bro - scripts/base/frameworks/analyzer/__load__.bro - scripts/base/frameworks/analyzer/main.bro - scripts/base/frameworks/packet-filter/utils.bro - build/scripts/base/bif/analyzer.bif.bro - scripts/base/frameworks/files/__load__.bro - scripts/base/frameworks/files/main.bro - build/scripts/base/bif/file_analysis.bif.bro - scripts/base/utils/site.bro - scripts/base/utils/patterns.bro - scripts/base/frameworks/files/magic/__load__.bro - build/scripts/base/bif/__load__.bro - build/scripts/base/bif/bloom-filter.bif.bro - build/scripts/base/bif/cardinality-counter.bif.bro - build/scripts/base/bif/top-k.bif.bro - build/scripts/base/bif/broxygen.bif.bro + build/scripts/base/bif/plugins/Bro_AsciiReader.ascii.bif.bro + build/scripts/base/bif/plugins/Bro_BenchmarkReader.benchmark.bif.bro + build/scripts/base/bif/plugins/Bro_BinaryReader.binary.bif.bro + build/scripts/base/bif/plugins/Bro_RawReader.raw.bif.bro + build/scripts/base/bif/plugins/Bro_SQLiteReader.sqlite.bif.bro + build/scripts/base/bif/plugins/Bro_AsciiWriter.ascii.bif.bro + build/scripts/base/bif/plugins/Bro_NoneWriter.none.bif.bro + build/scripts/base/bif/plugins/Bro_SQLiteWriter.sqlite.bif.bro scripts/base/init-default.bro scripts/base/utils/active-http.bro scripts/base/utils/exec.bro @@ -236,4 +243,4 @@ scripts/base/init-default.bro scripts/base/misc/find-checksum-offloading.bro scripts/base/misc/find-filtered-trace.bro scripts/policy/misc/loaded-scripts.bro -#close 2014-05-15-14-12-26 +#close 2014-09-06-01-20-32 diff --git a/testing/btest/Baseline/coverage.find-bro-logs/out b/testing/btest/Baseline/coverage.find-bro-logs/out new file mode 100644 index 0000000000..090a93d655 --- /dev/null +++ b/testing/btest/Baseline/coverage.find-bro-logs/out @@ -0,0 +1,42 @@ +app_stats +barnyard2 +capture_loss +cluster +communication +conn +dhcp +dnp3 +dns +dpd +files +ftp +http +intel +irc +known_certs +known_devices +known_hosts +known_modbus +known_services +loaded_scripts +modbus +modbus_register_change +notice +notice_alarm +packet_filter +radius +reporter +signatures +smtp +snmp +socks +software +ssh +ssl +stats +syslog +traceroute +tunnel +unified2 +weird +x509 diff --git a/testing/btest/Baseline/doc.broxygen.command_line/output b/testing/btest/Baseline/doc.broxygen.command_line/output new file mode 100644 index 0000000000..f599e28b8a --- /dev/null +++ b/testing/btest/Baseline/doc.broxygen.command_line/output @@ -0,0 +1 @@ +10 diff --git a/testing/btest/Baseline/doc.sphinx.data_struct_vector_declaration/btest-doc.sphinx.data_struct_vector_declaration#1 b/testing/btest/Baseline/doc.sphinx.data_struct_vector_declaration/btest-doc.sphinx.data_struct_vector_declaration#1 index d6e63d72ba..e8bb16ee00 100644 --- a/testing/btest/Baseline/doc.sphinx.data_struct_vector_declaration/btest-doc.sphinx.data_struct_vector_declaration#1 +++ b/testing/btest/Baseline/doc.sphinx.data_struct_vector_declaration/btest-doc.sphinx.data_struct_vector_declaration#1 @@ -7,6 +7,6 @@ # bro data_struct_vector_declaration.bro contents of v1: [1, 2, 3, 4] length of v1: 4 - contents of v1: [1, 2, 3, 4] + contents of v2: [1, 2, 3, 4] length of v2: 4 diff --git a/testing/btest/Baseline/doc.sphinx.data_type_pattern/btest-doc.sphinx.data_type_pattern#1 b/testing/btest/Baseline/doc.sphinx.data_type_pattern/btest-doc.sphinx.data_type_pattern#1 index 99281b205e..a05d4cdabc 100644 --- a/testing/btest/Baseline/doc.sphinx.data_type_pattern/btest-doc.sphinx.data_type_pattern#1 +++ b/testing/btest/Baseline/doc.sphinx.data_type_pattern/btest-doc.sphinx.data_type_pattern#1 @@ -6,6 +6,6 @@ # bro data_type_pattern_01.bro The - brown fox jumped over the + brown fox jumps over the dog. diff --git a/testing/btest/Baseline/language.outer_param_binding/out b/testing/btest/Baseline/language.outer_param_binding/out new file mode 100644 index 0000000000..28ad03c85a --- /dev/null +++ b/testing/btest/Baseline/language.outer_param_binding/out @@ -0,0 +1,3 @@ +error in /home/robin/bro/master/testing/btest/.tmp/language.outer_param_binding/outer_param_binding.bro, line 16: referencing outer function IDs not supported (c) +error in /home/robin/bro/master/testing/btest/.tmp/language.outer_param_binding/outer_param_binding.bro, line 16: referencing outer function IDs not supported (d) +error in /home/robin/bro/master/testing/btest/.tmp/language.outer_param_binding/outer_param_binding.bro, line 17: referencing outer function IDs not supported (b) diff --git a/testing/btest/Baseline/plugins.api-version-mismatch/output b/testing/btest/Baseline/plugins.api-version-mismatch/output index 806623cd02..1e4dae5e65 100644 --- a/testing/btest/Baseline/plugins.api-version-mismatch/output +++ b/testing/btest/Baseline/plugins.api-version-mismatch/output @@ -1 +1 @@ -fatal error in /home/robin/bro/master/scripts/base/init-bare.bro, line 1: plugin's API version does not match Bro (expected 2, got 42 in /home/robin/bro/master/testing/btest/.tmp/plugins.api-version-mismatch//lib/Demo-Foo.linux-x86_64.so) +fatal error in /home/robin/bro/master/scripts/base/init-bare.bro, line 1: plugin's API version does not match Bro (expected 2, got 42 in /home/robin/bro/master/testing/btest/.tmp/plugins.api-version-mismatch//lib/XXX) diff --git a/testing/btest/Baseline/plugins.bifs-and-scripts-install/output b/testing/btest/Baseline/plugins.bifs-and-scripts-install/output index a4187d0f7c..f03cfddc81 100644 --- a/testing/btest/Baseline/plugins.bifs-and-scripts-install/output +++ b/testing/btest/Baseline/plugins.bifs-and-scripts-install/output @@ -1,6 +1,6 @@ Demo::Foo - (dynamic, version 1.0) - [Event] plugin_event [Function] hello_plugin_world + [Event] plugin_event plugin: automatically loaded at startup calling bif, Hello from the plugin! diff --git a/testing/btest/Baseline/plugins.bifs-and-scripts/output b/testing/btest/Baseline/plugins.bifs-and-scripts/output index a082b3d690..47dd6ed430 100644 --- a/testing/btest/Baseline/plugins.bifs-and-scripts/output +++ b/testing/btest/Baseline/plugins.bifs-and-scripts/output @@ -1,6 +1,6 @@ Demo::Foo - (dynamic, version 1.0) - [Event] plugin_event [Function] hello_plugin_world + [Event] plugin_event === plugin: automatically loaded at startup diff --git a/testing/btest/Baseline/plugins.hooks/output b/testing/btest/Baseline/plugins.hooks/output index 7b0f9262ae..5deb40ca77 100644 --- a/testing/btest/Baseline/plugins.hooks/output +++ b/testing/btest/Baseline/plugins.hooks/output @@ -182,7 +182,7 @@ 0.000000 MetaHookPost CallFunction(Log::__create_stream, (Unified2::LOG, [columns=, ev=Unified2::log_unified2])) -> 0.000000 MetaHookPost CallFunction(Log::__create_stream, (Weird::LOG, [columns=, ev=Weird::log_weird])) -> 0.000000 MetaHookPost CallFunction(Log::__create_stream, (X509::LOG, [columns=, ev=X509::log_x509])) -> -0.000000 MetaHookPost CallFunction(Log::__write, (PacketFilter::LOG, [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T])) -> +0.000000 MetaHookPost CallFunction(Log::__write, (PacketFilter::LOG, [ts=1412721129.083128, node=bro, filter=ip or not ip, init=T, success=T])) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Cluster::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Communication::LOG)) -> 0.000000 MetaHookPost CallFunction(Log::add_default_filter, (Conn::LOG)) -> @@ -273,8 +273,8 @@ 0.000000 MetaHookPost CallFunction(Log::create_stream, (Unified2::LOG, [columns=, ev=Unified2::log_unified2])) -> 0.000000 MetaHookPost CallFunction(Log::create_stream, (Weird::LOG, [columns=, ev=Weird::log_weird])) -> 0.000000 MetaHookPost CallFunction(Log::create_stream, (X509::LOG, [columns=, ev=X509::log_x509])) -> -0.000000 MetaHookPost CallFunction(Log::default_path_func, (PacketFilter::LOG, , [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T])) -> -0.000000 MetaHookPost CallFunction(Log::write, (PacketFilter::LOG, [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T])) -> +0.000000 MetaHookPost CallFunction(Log::default_path_func, (PacketFilter::LOG, , [ts=1412721129.083128, node=bro, filter=ip or not ip, init=T, success=T])) -> +0.000000 MetaHookPost CallFunction(Log::write, (PacketFilter::LOG, [ts=1412721129.083128, node=bro, filter=ip or not ip, init=T, success=T])) -> 0.000000 MetaHookPost CallFunction(Notice::want_pp, ()) -> 0.000000 MetaHookPost CallFunction(PacketFilter::build, ()) -> 0.000000 MetaHookPost CallFunction(PacketFilter::combine_filters, (ip or not ip, and, )) -> @@ -316,7 +316,11 @@ 0.000000 MetaHookPost LoadFile(../main) -> -1 0.000000 MetaHookPost LoadFile(./Bro_ARP.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_AYIYA.events.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_AsciiReader.ascii.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_AsciiWriter.ascii.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_BackDoor.events.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_BenchmarkReader.benchmark.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_BinaryReader.binary.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_BitTorrent.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_ConnSize.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_DCE_RPC.events.bif.bro) -> -1 @@ -347,16 +351,20 @@ 0.000000 MetaHookPost LoadFile(./Bro_NetBIOS.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_NetBIOS.functions.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_NetFlow.events.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_NoneWriter.none.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_PIA.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_POP3.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_RADIUS.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_RPC.events.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_RawReader.raw.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SMB.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SMTP.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SMTP.functions.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SNMP.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SNMP.types.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SOCKS.events.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_SQLiteReader.sqlite.bif.bro) -> -1 +0.000000 MetaHookPost LoadFile(./Bro_SQLiteWriter.sqlite.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SSH.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SSL.events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./Bro_SteppingStone.events.bif.bro) -> -1 @@ -380,21 +388,20 @@ 0.000000 MetaHookPost LoadFile(./cardinality-counter.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./const.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./consts) -> -1 -0.000000 MetaHookPost LoadFile(./consts.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./consts.bro) -> -1 0.000000 MetaHookPost LoadFile(./contents) -> -1 0.000000 MetaHookPost LoadFile(./dcc-send) -> -1 0.000000 MetaHookPost LoadFile(./entities) -> -1 0.000000 MetaHookPost LoadFile(./event.bif.bro) -> -1 -0.000000 MetaHookPost LoadFile(./events.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./exec) -> -1 0.000000 MetaHookPost LoadFile(./file_analysis.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./files) -> -1 -0.000000 MetaHookPost LoadFile(./functions.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./gridftp) -> -1 0.000000 MetaHookPost LoadFile(./hll_unique) -> -1 +0.000000 MetaHookPost LoadFile(./hooks.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./inactivity) -> -1 0.000000 MetaHookPost LoadFile(./info) -> -1 +0.000000 MetaHookPost LoadFile(./init.bro) -> -1 0.000000 MetaHookPost LoadFile(./input) -> -1 0.000000 MetaHookPost LoadFile(./input.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./last) -> -1 @@ -408,6 +415,7 @@ 0.000000 MetaHookPost LoadFile(./netstats) -> -1 0.000000 MetaHookPost LoadFile(./non-cluster) -> -1 0.000000 MetaHookPost LoadFile(./patterns) -> -1 +0.000000 MetaHookPost LoadFile(./pcap.bif.bro) -> -1 0.000000 MetaHookPost LoadFile(./plugins) -> -1 0.000000 MetaHookPost LoadFile(./polling) -> -1 0.000000 MetaHookPost LoadFile(./postprocessors) -> -1 @@ -432,9 +440,7 @@ 0.000000 MetaHookPost LoadFile(.<...>/ascii) -> -1 0.000000 MetaHookPost LoadFile(.<...>/benchmark) -> -1 0.000000 MetaHookPost LoadFile(.<...>/binary) -> -1 -0.000000 MetaHookPost LoadFile(.<...>/dataseries) -> -1 0.000000 MetaHookPost LoadFile(.<...>/drop) -> -1 -0.000000 MetaHookPost LoadFile(.<...>/elasticsearch) -> -1 0.000000 MetaHookPost LoadFile(.<...>/email_admin) -> -1 0.000000 MetaHookPost LoadFile(.<...>/hostnames) -> -1 0.000000 MetaHookPost LoadFile(.<...>/none) -> -1 @@ -699,7 +705,7 @@ 0.000000 MetaHookPre CallFunction(Log::__create_stream, (Unified2::LOG, [columns=, ev=Unified2::log_unified2])) 0.000000 MetaHookPre CallFunction(Log::__create_stream, (Weird::LOG, [columns=, ev=Weird::log_weird])) 0.000000 MetaHookPre CallFunction(Log::__create_stream, (X509::LOG, [columns=, ev=X509::log_x509])) -0.000000 MetaHookPre CallFunction(Log::__write, (PacketFilter::LOG, [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T])) +0.000000 MetaHookPre CallFunction(Log::__write, (PacketFilter::LOG, [ts=1412721129.083128, node=bro, filter=ip or not ip, init=T, success=T])) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Cluster::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Communication::LOG)) 0.000000 MetaHookPre CallFunction(Log::add_default_filter, (Conn::LOG)) @@ -790,8 +796,8 @@ 0.000000 MetaHookPre CallFunction(Log::create_stream, (Unified2::LOG, [columns=, ev=Unified2::log_unified2])) 0.000000 MetaHookPre CallFunction(Log::create_stream, (Weird::LOG, [columns=, ev=Weird::log_weird])) 0.000000 MetaHookPre CallFunction(Log::create_stream, (X509::LOG, [columns=, ev=X509::log_x509])) -0.000000 MetaHookPre CallFunction(Log::default_path_func, (PacketFilter::LOG, , [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T])) -0.000000 MetaHookPre CallFunction(Log::write, (PacketFilter::LOG, [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T])) +0.000000 MetaHookPre CallFunction(Log::default_path_func, (PacketFilter::LOG, , [ts=1412721129.083128, node=bro, filter=ip or not ip, init=T, success=T])) +0.000000 MetaHookPre CallFunction(Log::write, (PacketFilter::LOG, [ts=1412721129.083128, node=bro, filter=ip or not ip, init=T, success=T])) 0.000000 MetaHookPre CallFunction(Notice::want_pp, ()) 0.000000 MetaHookPre CallFunction(PacketFilter::build, ()) 0.000000 MetaHookPre CallFunction(PacketFilter::combine_filters, (ip or not ip, and, )) @@ -833,7 +839,11 @@ 0.000000 MetaHookPre LoadFile(../main) 0.000000 MetaHookPre LoadFile(./Bro_ARP.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_AYIYA.events.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_AsciiReader.ascii.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_AsciiWriter.ascii.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_BackDoor.events.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_BenchmarkReader.benchmark.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_BinaryReader.binary.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_BitTorrent.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_ConnSize.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_DCE_RPC.events.bif.bro) @@ -864,16 +874,20 @@ 0.000000 MetaHookPre LoadFile(./Bro_NetBIOS.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_NetBIOS.functions.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_NetFlow.events.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_NoneWriter.none.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_PIA.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_POP3.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_RADIUS.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_RPC.events.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_RawReader.raw.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SMB.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SMTP.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SMTP.functions.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SNMP.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SNMP.types.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SOCKS.events.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_SQLiteReader.sqlite.bif.bro) +0.000000 MetaHookPre LoadFile(./Bro_SQLiteWriter.sqlite.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SSH.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SSL.events.bif.bro) 0.000000 MetaHookPre LoadFile(./Bro_SteppingStone.events.bif.bro) @@ -897,21 +911,20 @@ 0.000000 MetaHookPre LoadFile(./cardinality-counter.bif.bro) 0.000000 MetaHookPre LoadFile(./const.bif.bro) 0.000000 MetaHookPre LoadFile(./consts) -0.000000 MetaHookPre LoadFile(./consts.bif.bro) 0.000000 MetaHookPre LoadFile(./consts.bro) 0.000000 MetaHookPre LoadFile(./contents) 0.000000 MetaHookPre LoadFile(./dcc-send) 0.000000 MetaHookPre LoadFile(./entities) 0.000000 MetaHookPre LoadFile(./event.bif.bro) -0.000000 MetaHookPre LoadFile(./events.bif.bro) 0.000000 MetaHookPre LoadFile(./exec) 0.000000 MetaHookPre LoadFile(./file_analysis.bif.bro) 0.000000 MetaHookPre LoadFile(./files) -0.000000 MetaHookPre LoadFile(./functions.bif.bro) 0.000000 MetaHookPre LoadFile(./gridftp) 0.000000 MetaHookPre LoadFile(./hll_unique) +0.000000 MetaHookPre LoadFile(./hooks.bif.bro) 0.000000 MetaHookPre LoadFile(./inactivity) 0.000000 MetaHookPre LoadFile(./info) +0.000000 MetaHookPre LoadFile(./init.bro) 0.000000 MetaHookPre LoadFile(./input) 0.000000 MetaHookPre LoadFile(./input.bif.bro) 0.000000 MetaHookPre LoadFile(./last) @@ -925,6 +938,7 @@ 0.000000 MetaHookPre LoadFile(./netstats) 0.000000 MetaHookPre LoadFile(./non-cluster) 0.000000 MetaHookPre LoadFile(./patterns) +0.000000 MetaHookPre LoadFile(./pcap.bif.bro) 0.000000 MetaHookPre LoadFile(./plugins) 0.000000 MetaHookPre LoadFile(./polling) 0.000000 MetaHookPre LoadFile(./postprocessors) @@ -949,9 +963,7 @@ 0.000000 MetaHookPre LoadFile(.<...>/ascii) 0.000000 MetaHookPre LoadFile(.<...>/benchmark) 0.000000 MetaHookPre LoadFile(.<...>/binary) -0.000000 MetaHookPre LoadFile(.<...>/dataseries) 0.000000 MetaHookPre LoadFile(.<...>/drop) -0.000000 MetaHookPre LoadFile(.<...>/elasticsearch) 0.000000 MetaHookPre LoadFile(.<...>/email_admin) 0.000000 MetaHookPre LoadFile(.<...>/hostnames) 0.000000 MetaHookPre LoadFile(.<...>/none) @@ -1216,7 +1228,7 @@ 0.000000 | HookCallFunction Log::__create_stream(Unified2::LOG, [columns=, ev=Unified2::log_unified2]) 0.000000 | HookCallFunction Log::__create_stream(Weird::LOG, [columns=, ev=Weird::log_weird]) 0.000000 | HookCallFunction Log::__create_stream(X509::LOG, [columns=, ev=X509::log_x509]) -0.000000 | HookCallFunction Log::__write(PacketFilter::LOG, [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T]) +0.000000 | HookCallFunction Log::__write(PacketFilter::LOG, [ts=1412721129.083128, node=bro, filter=ip or not ip, init=T, success=T]) 0.000000 | HookCallFunction Log::add_default_filter(Cluster::LOG) 0.000000 | HookCallFunction Log::add_default_filter(Communication::LOG) 0.000000 | HookCallFunction Log::add_default_filter(Conn::LOG) @@ -1307,8 +1319,8 @@ 0.000000 | HookCallFunction Log::create_stream(Unified2::LOG, [columns=, ev=Unified2::log_unified2]) 0.000000 | HookCallFunction Log::create_stream(Weird::LOG, [columns=, ev=Weird::log_weird]) 0.000000 | HookCallFunction Log::create_stream(X509::LOG, [columns=, ev=X509::log_x509]) -0.000000 | HookCallFunction Log::default_path_func(PacketFilter::LOG, , [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T]) -0.000000 | HookCallFunction Log::write(PacketFilter::LOG, [ts=1405981560.501473, node=bro, filter=ip or not ip, init=T, success=T]) +0.000000 | HookCallFunction Log::default_path_func(PacketFilter::LOG, , [ts=1412721129.083128, node=bro, filter=ip or not ip, init=T, success=T]) +0.000000 | HookCallFunction Log::write(PacketFilter::LOG, [ts=1412721129.083128, node=bro, filter=ip or not ip, init=T, success=T]) 0.000000 | HookCallFunction Notice::want_pp() 0.000000 | HookCallFunction PacketFilter::build() 0.000000 | HookCallFunction PacketFilter::combine_filters(ip or not ip, and, ) @@ -1520,10 +1532,20 @@ 1362692527.008509 MetaHookPre UpdateNetworkTime(1362692527.008509) 1362692527.008509 | HookUpdateNetworkTime 1362692527.008509 1362692527.008509 | HookDrainEvents +1362692527.009512 MetaHookPost CallFunction(Files::__add_analyzers_for_mime_type, (FakNcS1Jfe01uljb3, text/plain, [chunk_event=, stream_event=, extract_filename=, extract_limit=0])) -> +1362692527.009512 MetaHookPost CallFunction(Files::add_analyzers_for_mime_type, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain)) -> +1362692527.009512 MetaHookPost CallFunction(Files::set_info, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=])) -> +1362692527.009512 MetaHookPost CallFunction(Files::set_info, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=])) -> 1362692527.009512 MetaHookPost CallFunction(HTTP::code_in_range, (200, 100, 199)) -> +1362692527.009512 MetaHookPost CallFunction(HTTP::get_file_handle, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> 1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> 1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> 1362692527.009512 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> +1362692527.009512 MetaHookPost CallFunction(cat, (Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80)) -> +1362692527.009512 MetaHookPost CallFunction(file_new, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=])) -> +1362692527.009512 MetaHookPost CallFunction(file_over_new_connection, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> +1362692527.009512 MetaHookPost CallFunction(fmt, (%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp)) -> +1362692527.009512 MetaHookPost CallFunction(get_file_handle, (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> 1362692527.009512 MetaHookPost CallFunction(http_begin_entity, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> 1362692527.009512 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) -> 1362692527.009512 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) -> @@ -1535,7 +1557,13 @@ 1362692527.009512 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/2.4.3 (Fedora))) -> 1362692527.009512 MetaHookPost CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain; charset=UTF-8)) -> 1362692527.009512 MetaHookPost CallFunction(http_reply, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) -> +1362692527.009512 MetaHookPost CallFunction(id_string, ([orig_h=141.142.228.5, orig_p=59856<...>/tcp])) -> +1362692527.009512 MetaHookPost CallFunction(set_file_handle, (Analyzer::ANALYZER_HTTP1362692526.869344F11141.142.228.5:59856 > 192.150.187.43:80)) -> +1362692527.009512 MetaHookPost CallFunction(split_all, (HTTP, <...>/)) -> 1362692527.009512 MetaHookPost DrainEvents() -> +1362692527.009512 MetaHookPost QueueEvent(file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=])) -> false +1362692527.009512 MetaHookPost QueueEvent(file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false +1362692527.009512 MetaHookPost QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false 1362692527.009512 MetaHookPost QueueEvent(http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false 1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) -> false 1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) -> false @@ -1548,10 +1576,20 @@ 1362692527.009512 MetaHookPost QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain; charset=UTF-8)) -> false 1362692527.009512 MetaHookPost QueueEvent(http_reply([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) -> false 1362692527.009512 MetaHookPost UpdateNetworkTime(1362692527.009512) -> +1362692527.009512 MetaHookPre CallFunction(Files::__add_analyzers_for_mime_type, (FakNcS1Jfe01uljb3, text/plain, [chunk_event=, stream_event=, extract_filename=, extract_limit=0])) +1362692527.009512 MetaHookPre CallFunction(Files::add_analyzers_for_mime_type, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain)) +1362692527.009512 MetaHookPre CallFunction(Files::set_info, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=])) +1362692527.009512 MetaHookPre CallFunction(Files::set_info, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=])) 1362692527.009512 MetaHookPre CallFunction(HTTP::code_in_range, (200, 100, 199)) +1362692527.009512 MetaHookPre CallFunction(HTTP::get_file_handle, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) 1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) 1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) 1362692527.009512 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) +1362692527.009512 MetaHookPre CallFunction(cat, (Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80)) +1362692527.009512 MetaHookPre CallFunction(file_new, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=])) +1362692527.009512 MetaHookPre CallFunction(file_over_new_connection, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009512 MetaHookPre CallFunction(fmt, (%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp)) +1362692527.009512 MetaHookPre CallFunction(get_file_handle, (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) 1362692527.009512 MetaHookPre CallFunction(http_begin_entity, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) 1362692527.009512 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) 1362692527.009512 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) @@ -1563,7 +1601,13 @@ 1362692527.009512 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/2.4.3 (Fedora))) 1362692527.009512 MetaHookPre CallFunction(http_header, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain; charset=UTF-8)) 1362692527.009512 MetaHookPre CallFunction(http_reply, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) +1362692527.009512 MetaHookPre CallFunction(id_string, ([orig_h=141.142.228.5, orig_p=59856<...>/tcp])) +1362692527.009512 MetaHookPre CallFunction(set_file_handle, (Analyzer::ANALYZER_HTTP1362692526.869344F11141.142.228.5:59856 > 192.150.187.43:80)) +1362692527.009512 MetaHookPre CallFunction(split_all, (HTTP, <...>/)) 1362692527.009512 MetaHookPre DrainEvents() +1362692527.009512 MetaHookPre QueueEvent(file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=])) +1362692527.009512 MetaHookPre QueueEvent(file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) +1362692527.009512 MetaHookPre QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) 1362692527.009512 MetaHookPre QueueEvent(http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) 1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes)) 1362692527.009512 MetaHookPre QueueEvent(http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive)) @@ -1577,10 +1621,20 @@ 1362692527.009512 MetaHookPre QueueEvent(http_reply([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK)) 1362692527.009512 MetaHookPre UpdateNetworkTime(1362692527.009512) 1362692527.009512 | HookUpdateNetworkTime 1362692527.009512 +1362692527.009512 | HookCallFunction Files::__add_analyzers_for_mime_type(FakNcS1Jfe01uljb3, text/plain, [chunk_event=, stream_event=, extract_filename=, extract_limit=0]) +1362692527.009512 | HookCallFunction Files::add_analyzers_for_mime_type([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain) +1362692527.009512 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=]) +1362692527.009512 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=]) 1362692527.009512 | HookCallFunction HTTP::code_in_range(200, 100, 199) +1362692527.009512 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) 1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) 1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) 1362692527.009512 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) +1362692527.009512 | HookCallFunction cat(Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80) +1362692527.009512 | HookCallFunction file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=]) +1362692527.009512 | HookCallFunction file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009512 | HookCallFunction fmt(%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp) +1362692527.009512 | HookCallFunction get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) 1362692527.009512 | HookCallFunction http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) 1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes) 1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive) @@ -1592,7 +1646,13 @@ 1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/2.4.3 (Fedora)) 1362692527.009512 | HookCallFunction http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain; charset=UTF-8) 1362692527.009512 | HookCallFunction http_reply([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK) +1362692527.009512 | HookCallFunction id_string([orig_h=141.142.228.5, orig_p=59856<...>/tcp]) +1362692527.009512 | HookCallFunction set_file_handle(Analyzer::ANALYZER_HTTP1362692526.869344F11141.142.228.5:59856 > 192.150.187.43:80) +1362692527.009512 | HookCallFunction split_all(HTTP, <...>/) 1362692527.009512 | HookDrainEvents +1362692527.009512 | HookQueueEvent file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=]) +1362692527.009512 | HookQueueEvent file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) +1362692527.009512 | HookQueueEvent get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) 1362692527.009512 | HookQueueEvent http_begin_entity([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) 1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, ACCEPT-RANGES, bytes) 1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, CONNECTION, Keep-Alive) @@ -1604,60 +1664,12 @@ 1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/2.4.3 (Fedora)) 1362692527.009512 | HookQueueEvent http_header([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain; charset=UTF-8) 1362692527.009512 | HookQueueEvent http_reply([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=, status_msg=, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=, orig_mime_depth=1, resp_mime_depth=0]}, current_request=1, current_response=0], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], 1.1, 200, OK) -1362692527.009721 MetaHookPost CallFunction(Files::__add_analyzers_for_mime_type, (FakNcS1Jfe01uljb3, text/plain, [chunk_event=, stream_event=, extract_filename=, extract_limit=0])) -> -1362692527.009721 MetaHookPost CallFunction(Files::add_analyzers_for_mime_type, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain)) -> -1362692527.009721 MetaHookPost CallFunction(Files::set_info, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=])) -> -1362692527.009721 MetaHookPost CallFunction(Files::set_info, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=])) -> -1362692527.009721 MetaHookPost CallFunction(HTTP::get_file_handle, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009721 MetaHookPost CallFunction(cat, (Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80)) -> -1362692527.009721 MetaHookPost CallFunction(file_new, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=])) -> -1362692527.009721 MetaHookPost CallFunction(file_over_new_connection, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009721 MetaHookPost CallFunction(fmt, (%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp)) -> -1362692527.009721 MetaHookPost CallFunction(get_file_handle, (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> -1362692527.009721 MetaHookPost CallFunction(id_string, ([orig_h=141.142.228.5, orig_p=59856<...>/tcp])) -> -1362692527.009721 MetaHookPost CallFunction(set_file_handle, (Analyzer::ANALYZER_HTTP1362692526.869344F11141.142.228.5:59856 > 192.150.187.43:80)) -> -1362692527.009721 MetaHookPost CallFunction(split_all, (HTTP, <...>/)) -> 1362692527.009721 MetaHookPost DrainEvents() -> -1362692527.009721 MetaHookPost QueueEvent(file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=])) -> false -1362692527.009721 MetaHookPost QueueEvent(file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false -1362692527.009721 MetaHookPost QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> false 1362692527.009721 MetaHookPost UpdateNetworkTime(1362692527.009721) -> -1362692527.009721 MetaHookPre CallFunction(Files::__add_analyzers_for_mime_type, (FakNcS1Jfe01uljb3, text/plain, [chunk_event=, stream_event=, extract_filename=, extract_limit=0])) -1362692527.009721 MetaHookPre CallFunction(Files::add_analyzers_for_mime_type, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain)) -1362692527.009721 MetaHookPre CallFunction(Files::set_info, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=])) -1362692527.009721 MetaHookPre CallFunction(Files::set_info, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=])) -1362692527.009721 MetaHookPre CallFunction(HTTP::get_file_handle, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009721 MetaHookPre CallFunction(cat, (Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80)) -1362692527.009721 MetaHookPre CallFunction(file_new, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=])) -1362692527.009721 MetaHookPre CallFunction(file_over_new_connection, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009721 MetaHookPre CallFunction(fmt, (%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp)) -1362692527.009721 MetaHookPre CallFunction(get_file_handle, (Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009721 MetaHookPre CallFunction(id_string, ([orig_h=141.142.228.5, orig_p=59856<...>/tcp])) -1362692527.009721 MetaHookPre CallFunction(set_file_handle, (Analyzer::ANALYZER_HTTP1362692526.869344F11141.142.228.5:59856 > 192.150.187.43:80)) -1362692527.009721 MetaHookPre CallFunction(split_all, (HTTP, <...>/)) 1362692527.009721 MetaHookPre DrainEvents() -1362692527.009721 MetaHookPre QueueEvent(file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=])) -1362692527.009721 MetaHookPre QueueEvent(file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -1362692527.009721 MetaHookPre QueueEvent(get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) 1362692527.009721 MetaHookPre UpdateNetworkTime(1362692527.009721) 1362692527.009721 | HookUpdateNetworkTime 1362692527.009721 -1362692527.009721 | HookCallFunction Files::__add_analyzers_for_mime_type(FakNcS1Jfe01uljb3, text/plain, [chunk_event=, stream_event=, extract_filename=, extract_limit=0]) -1362692527.009721 | HookCallFunction Files::add_analyzers_for_mime_type([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain) -1362692527.009721 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=]) -1362692527.009721 | HookCallFunction Files::set_info([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=]) -1362692527.009721 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009721 | HookCallFunction cat(Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80) -1362692527.009721 | HookCallFunction file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=]) -1362692527.009721 | HookCallFunction file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009721 | HookCallFunction fmt(%s:%d > %s:%d, 141.142.228.5, 59856<...>/tcp) -1362692527.009721 | HookCallFunction get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009721 | HookCallFunction id_string([orig_h=141.142.228.5, orig_p=59856<...>/tcp]) -1362692527.009721 | HookCallFunction set_file_handle(Analyzer::ANALYZER_HTTP1362692526.869344F11141.142.228.5:59856 > 192.150.187.43:80) -1362692527.009721 | HookCallFunction split_all(HTTP, <...>/) 1362692527.009721 | HookDrainEvents -1362692527.009721 | HookQueueEvent file_new([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain]], info=, u2_events=]) -1362692527.009721 | HookQueueEvent file_over_new_connection([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) -1362692527.009721 | HookQueueEvent get_file_handle(Analyzer::ANALYZER_HTTP, [id=[orig_h=141.142.228.5, orig_p=59856<...>/1.14 (darwin12.2.0), request_body_len=0, response_body_len=0, status_code=200, status_msg=OK, info_code=, info_msg=, filename=, tags={}, username=, password=, capture_password=F, proxied=, range_request=F, orig_fuids=, orig_mime_types=, resp_fuids=, resp_mime_types=, current_entity=[filename=], orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) 1362692527.009765 MetaHookPost DrainEvents() -> 1362692527.009765 MetaHookPost UpdateNetworkTime(1362692527.009765) -> 1362692527.009765 MetaHookPre DrainEvents() @@ -1668,11 +1680,11 @@ 1362692527.009775 MetaHookPost CallFunction(HTTP::code_in_range, (200, 100, 199)) -> 1362692527.009775 MetaHookPost CallFunction(HTTP::get_file_handle, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) -> 1362692527.009775 MetaHookPost CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -> -1362692527.009775 MetaHookPost CallFunction(Log::__write, (Files::LOG, [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) -> +1362692527.009775 MetaHookPost CallFunction(Log::__write, (Files::LOG, [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) -> 1362692527.009775 MetaHookPost CallFunction(Log::__write, (HTTP::LOG, [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1])) -> -1362692527.009775 MetaHookPost CallFunction(Log::default_path_func, (Files::LOG, , [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) -> +1362692527.009775 MetaHookPost CallFunction(Log::default_path_func, (Files::LOG, , [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) -> 1362692527.009775 MetaHookPost CallFunction(Log::default_path_func, (HTTP::LOG, , [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1])) -> -1362692527.009775 MetaHookPost CallFunction(Log::write, (Files::LOG, [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) -> +1362692527.009775 MetaHookPost CallFunction(Log::write, (Files::LOG, [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) -> 1362692527.009775 MetaHookPost CallFunction(Log::write, (HTTP::LOG, [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1])) -> 1362692527.009775 MetaHookPost CallFunction(cat, (Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80)) -> 1362692527.009775 MetaHookPost CallFunction(file_state_remove, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=])) -> @@ -1700,11 +1712,11 @@ 1362692527.009775 MetaHookPre CallFunction(HTTP::code_in_range, (200, 100, 199)) 1362692527.009775 MetaHookPre CallFunction(HTTP::get_file_handle, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F)) 1362692527.009775 MetaHookPre CallFunction(HTTP::set_state, ([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F)) -1362692527.009775 MetaHookPre CallFunction(Log::__write, (Files::LOG, [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) +1362692527.009775 MetaHookPre CallFunction(Log::__write, (Files::LOG, [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) 1362692527.009775 MetaHookPre CallFunction(Log::__write, (HTTP::LOG, [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1])) -1362692527.009775 MetaHookPre CallFunction(Log::default_path_func, (Files::LOG, , [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) +1362692527.009775 MetaHookPre CallFunction(Log::default_path_func, (Files::LOG, , [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) 1362692527.009775 MetaHookPre CallFunction(Log::default_path_func, (HTTP::LOG, , [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1])) -1362692527.009775 MetaHookPre CallFunction(Log::write, (Files::LOG, [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) +1362692527.009775 MetaHookPre CallFunction(Log::write, (Files::LOG, [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=])) 1362692527.009775 MetaHookPre CallFunction(Log::write, (HTTP::LOG, [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1])) 1362692527.009775 MetaHookPre CallFunction(cat, (Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80)) 1362692527.009775 MetaHookPre CallFunction(file_state_remove, ([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=])) @@ -1733,11 +1745,11 @@ 1362692527.009775 | HookCallFunction HTTP::code_in_range(200, 100, 199) 1362692527.009775 | HookCallFunction HTTP::get_file_handle([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F) 1362692527.009775 | HookCallFunction HTTP::set_state([id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]}, current_request=1, current_response=1], irc=, modbus=, radius=, snmp=, smtp=, smtp_state=, socks=, ssh=, syslog=], F, F) -1362692527.009775 | HookCallFunction Log::__write(Files::LOG, [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=]) +1362692527.009775 | HookCallFunction Log::__write(Files::LOG, [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=]) 1362692527.009775 | HookCallFunction Log::__write(HTTP::LOG, [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]) -1362692527.009775 | HookCallFunction Log::default_path_func(Files::LOG, , [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=]) +1362692527.009775 | HookCallFunction Log::default_path_func(Files::LOG, , [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=]) 1362692527.009775 | HookCallFunction Log::default_path_func(HTTP::LOG, , [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]) -1362692527.009775 | HookCallFunction Log::write(Files::LOG, [ts=1362692527.009721, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=53.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=]) +1362692527.009775 | HookCallFunction Log::write(Files::LOG, [ts=1362692527.009512, fuid=FakNcS1Jfe01uljb3, tx_hosts={192.150.187.43}, rx_hosts={141.142.228.5}, conn_uids={CXWv6p3arKYeMETxOg}, source=HTTP, depth=0, analyzers={}, mime_type=text/plain, filename=, duration=262.0 usecs, local_orig=, is_orig=F, seen_bytes=4705, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=]) 1362692527.009775 | HookCallFunction Log::write(HTTP::LOG, [ts=1362692526.939527, uid=CXWv6p3arKYeMETxOg, id=[orig_h=141.142.228.5, orig_p=59856<...>/plain], current_entity=, orig_mime_depth=1, resp_mime_depth=1]) 1362692527.009775 | HookCallFunction cat(Analyzer::ANALYZER_HTTP, 1362692526.869344, F, 1, 1, 141.142.228.5:59856 > 192.150.187.43:80) 1362692527.009775 | HookCallFunction file_state_remove([id=FakNcS1Jfe01uljb3, parent_id=, source=HTTP, is_orig=F, conns={[[orig_h=141.142.228.5, orig_p=59856<...>/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=4705, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=]) diff --git a/testing/btest/Baseline/plugins.pktdumper/output b/testing/btest/Baseline/plugins.pktdumper/output new file mode 100644 index 0000000000..42b51e8051 --- /dev/null +++ b/testing/btest/Baseline/plugins.pktdumper/output @@ -0,0 +1,12 @@ +Demo::Foo - A Foo packet dumper (dynamic, version 1.0) + [Packet Dumper] FooPktDumper (dumper prefix: "foo") + +=== +Dumping to XXX: 1373858797.646968 len 94 +Dumping to XXX: 1373858797.646998 len 94 +Dumping to XXX: 1373858797.647041 len 86 +Dumping to XXX: 1373858797.647147 len 98 +Dumping to XXX: 1373858797.647186 len 86 +Dumping to XXX: 1373858797.647250 len 86 +Dumping to XXX: 1373858797.647317 len 86 +Dumping to XXX: 1373858797.647350 len 86 diff --git a/testing/btest/Baseline/plugins.pktsrc/conn.log b/testing/btest/Baseline/plugins.pktsrc/conn.log new file mode 100644 index 0000000000..ab218f18fd --- /dev/null +++ b/testing/btest/Baseline/plugins.pktsrc/conn.log @@ -0,0 +1,10 @@ +#separator \x09 +#set_separator , +#empty_field (empty) +#unset_field - +#path conn +#open 2014-09-04-18-06-05 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents +#types time string addr port addr port enum string interval count count string bool count string count count count count set[string] +1409193037.000000 CXWv6p3arKYeMETxOg 1.2.0.2 2527 1.2.0.3 6649 tcp - - - - S0 - 0 S 1 64 0 0 (empty) +#close 2014-09-04-18-06-05 diff --git a/testing/btest/Baseline/plugins.reader/out b/testing/btest/Baseline/plugins.reader/out new file mode 100644 index 0000000000..9dd3101a8d --- /dev/null +++ b/testing/btest/Baseline/plugins.reader/out @@ -0,0 +1,10 @@ +Input::EVENT_NEW +^)kHV32-J_ +Input::EVENT_NEW +(s[Q8J4Pu4 +Input::EVENT_NEW ++3iDbOB}kq +Input::EVENT_NEW +tz9dFehHz) +Input::EVENT_NEW +d&@3g)NljG diff --git a/testing/btest/Baseline/plugins.reader/output b/testing/btest/Baseline/plugins.reader/output new file mode 100644 index 0000000000..fa218d04a5 --- /dev/null +++ b/testing/btest/Baseline/plugins.reader/output @@ -0,0 +1,4 @@ +Demo::Foo - A Foo test input reader (dynamic, version 1.0) + [Writer] Foo (Input::READER_FOO) + +=== diff --git a/testing/btest/Baseline/plugins.writer/output b/testing/btest/Baseline/plugins.writer/output new file mode 100644 index 0000000000..0882718f03 --- /dev/null +++ b/testing/btest/Baseline/plugins.writer/output @@ -0,0 +1,22 @@ +Demo::Foo - A Foo test logging writer (dynamic, version 1.0) + [Writer] Foo (Log::WRITER_FOO) + +=== +[conn] 1340213005.165293|CXWv6p3arKYeMETxOg|10.0.0.55|53994|60.190.189.214|8124|tcp|-|4.314406|0|0|S0|-|0|S|5|320|0|0| +[conn] 1340213010.582723|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|tcp|http,socks|13.839419|3860|2934|SF|-|0|ShADadfF|23|5080|20|3986| +[conn] 1340213048.780152|CCvvfg3TEfuqmmG4bh|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| +[conn] 1340213097.272764|CsRx2w45OKnoww6xl4|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| +[conn] 1340213162.160367|CRJuHdVW0XPVINV8a|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| +[conn] 1340213226.561757|CPbrpk1qSsw6ESzHV4|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| +[conn] 1340213290.981995|C6pKV8GSxOnSLghOa|10.0.0.55|53994|60.190.189.214|8124|tcp|-|-|-|-|SH|-|0|F|1|52|0|0| +[files] 1340213020.732547|FBtZ7y1ppK8iIeY622|60.190.189.214|10.0.0.55|CjhGID4nQcgTWjvg4c|HTTP|0||image/gif|-|0.000034|-|F|1368|1368|0|0|F|-|-|-|-|- +[http] 1340213019.013158|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|1|GET|www.osnews.com|/images/printer2.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[http] 1340213019.013426|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|2|GET|www.osnews.com|/img2/shorturl.jpg|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[http] 1340213019.580162|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|3|GET|www.osnews.com|/images/icons/9.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[http] 1340213020.155861|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|4|GET|www.osnews.com|/images/icons/26.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|1368|200|OK|-|-|-||-|-|-|-|-|FBtZ7y1ppK8iIeY622|image/gif +[http] 1340213020.732963|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|5|GET|www.osnews.com|/images/icons/17.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[http] 1340213021.300269|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|6|GET|www.osnews.com|/images/left.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[http] 1340213021.861584|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|7|GET|www.osnews.com|/images/icons/32.gif|http://www.osnews.com/|Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.2) Gecko/20100101 Firefox/10.0.2|0|0|304|Not Modified|-|-|-||-|-|-|-|-|-|- +[packet_filter] 1412721099.419280|bro|ip or not ip|T|T +[socks] 1340213015.276495|CjhGID4nQcgTWjvg4c|10.0.0.55|53994|60.190.189.214|8124|5|-|succeeded|-|www.osnews.com|80|192.168.0.31|-|2688 +[tunnel] 1340213015.276495|-|10.0.0.55|0|60.190.189.214|8124|Tunnel::SOCKS|Tunnel::DISCOVER diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.actions.data_event/out b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.actions.data_event/out index cbd60840bf..d1cc77944c 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.actions.data_event/out +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.actions.data_event/out @@ -5,18 +5,14 @@ FILE_BOF_BUFFER MIME_TYPE text/plain FILE_OVER_NEW_CONNECTION -file_stream, file #0, 1500, ^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea -file_chunk, file #0, 1500, 0, ^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-relea -file_stream, file #0, 1024, se script, which could pick out the wrong^J tag. (Robin Sommer)^J^J0.21 | 2011-10-27 17:40:45 -0700^J^J * Fixing bro-cut's usage message and argument error handling. (Robin Sommer)^J^J * Bugfix in update-changes script. (Robin Sommer)^J^J * update-changes now ignores commits it did itself. (Robin Sommer)^J^J * Fix a bug in the update-changes script. (Robin Sommer)^J^J * bro-cut now always installs to $prefix/bin by `make install`. (Jon Siwek)^J^J * Options to adjust time format for bro-cut. (Robin Sommer)^J^J The default with -d is now ISO format. The new option "-D "^J specifies a custom strftime()-style format string. Alternatively,^J the environment variable BRO_CUT_TIMEFMT can set the format as^J well.^J^J * bro-cut now understands the field separator header. (Robin Sommer)^J^J * Renaming options -h/-H -> -c/-C, and doing some general cleanup.^J^J0.2 | 2011-10-25 19:53:57 -0700^J^J * Adding support for replacing version string in a setup.py. (Robin^J Sommer)^J^J * Change generated root cert DN indices f -file_chunk, file #0, 1024, 1500, se script, which could pick out the wrong^J tag. (Robin Sommer)^J^J0.21 | 2011-10-27 17:40:45 -0700^J^J * Fixing bro-cut's usage message and argument error handling. (Robin Sommer)^J^J * Bugfix in update-changes script. (Robin Sommer)^J^J * update-changes now ignores commits it did itself. (Robin Sommer)^J^J * Fix a bug in the update-changes script. (Robin Sommer)^J^J * bro-cut now always installs to $prefix/bin by `make install`. (Jon Siwek)^J^J * Options to adjust time format for bro-cut. (Robin Sommer)^J^J The default with -d is now ISO format. The new option "-D "^J specifies a custom strftime()-style format string. Alternatively,^J the environment variable BRO_CUT_TIMEFMT can set the format as^J well.^J^J * bro-cut now understands the field separator header. (Robin Sommer)^J^J * Renaming options -h/-H -> -c/-C, and doing some general cleanup.^J^J0.2 | 2011-10-25 19:53:57 -0700^J^J * Adding support for replacing version string in a setup.py. (Robin^J Sommer)^J^J * Change generated root cert DN indices f -file_stream, file #0, 476, ormat for RFC2253^J compliance. (Jon Siwek)^J^J * New tool devel-tools/check-release to run before making releases.^J (Robin Sommer)^J^J * devel-tools/update-changes gets a new option -a to amend to^J previous commit if possible. Default is now not to (used to be the^J opposite). (Robin Sommer)^J^J * Change Mozilla trust root generation to index certs by subject DN. (Jon Siwek)^J^J * Change distclean to only remove build dir. (Jon Siwek)^J^J * Make dist now cleans the -file_chunk, file #0, 476, 2524, ormat for RFC2253^J compliance. (Jon Siwek)^J^J * New tool devel-tools/check-release to run before making releases.^J (Robin Sommer)^J^J * devel-tools/update-changes gets a new option -a to amend to^J previous commit if possible. Default is now not to (used to be the^J opposite). (Robin Sommer)^J^J * Change Mozilla trust root generation to index certs by subject DN. (Jon Siwek)^J^J * Change distclean to only remove build dir. (Jon Siwek)^J^J * Make dist now cleans the -file_stream, file #0, 1024, copied source (Jon Siwek)^J^J * Small tweak to make-release for forced git-clean. (Jon Siwek)^J^J * Fix to not let updates scripts loose their executable permissions.^J (Robin Sommer)^J^J * devel-tools/update-changes now looks for a 'release' tag to^J idenfify the stable version, and 'beta' for the beta versions.^J (Robin Sommer).^J^J * Distribution cleanup. (Robin Sommer)^J^J * New script devel-tools/make-release to create source tar balls.^J (Robin Sommer)^J^J * Removing bdcat. With the new log format, this isn't very useful^J anymore. (Robin Sommer)^J^J * Adding script that shows all pending git fastpath commits. (Robin^J Sommer)^J^J * Script to measure CPU time by loading an increasing set of^J scripts. (Robin Sommer)^J^J * extract-conn script now deals wit *.gz files. (Robin Sommer)^J^J * Tiny update to output a valid CA list file for SSL cert^J validation. (Seth Hall)^J^J * Adding "install-aux" target. Addresses #622. (Jon Siwek)^J^J * Distribution cleanup. (Jon Siwek and Robin Sommer)^J^J * FindPCAP -file_chunk, file #0, 1024, 3000, copied source (Jon Siwek)^J^J * Small tweak to make-release for forced git-clean. (Jon Siwek)^J^J * Fix to not let updates scripts loose their executable permissions.^J (Robin Sommer)^J^J * devel-tools/update-changes now looks for a 'release' tag to^J idenfify the stable version, and 'beta' for the beta versions.^J (Robin Sommer).^J^J * Distribution cleanup. (Robin Sommer)^J^J * New script devel-tools/make-release to create source tar balls.^J (Robin Sommer)^J^J * Removing bdcat. With the new log format, this isn't very useful^J anymore. (Robin Sommer)^J^J * Adding script that shows all pending git fastpath commits. (Robin^J Sommer)^J^J * Script to measure CPU time by loading an increasing set of^J scripts. (Robin Sommer)^J^J * extract-conn script now deals wit *.gz files. (Robin Sommer)^J^J * Tiny update to output a valid CA list file for SSL cert^J validation. (Seth Hall)^J^J * Adding "install-aux" target. Addresses #622. (Jon Siwek)^J^J * Distribution cleanup. (Jon Siwek and Robin Sommer)^J^J * FindPCAP -file_stream, file #0, 476, now links against thread library when necessary (e.g.^J PF_RING's libpcap) (Jon Siwek)^J^J * Install binaries with an RPATH (Jon Siwek)^J^J * Workaround for FreeBSD CMake port missing debug flags (Jon Siwek)^J^J * Rewrite of the update-changes script. (Robin Sommer)^J^J0.1-1 | 2011-06-14 21:12:41 -0700^J^J * Add a script for generating Mozilla's CA list for the SSL analyzer.^J (Seth Hall)^J^J0.1 | 2011-04-01 16:28:22 -0700^J^J * Converting build process to CMake. (Jon Siwek)^J -file_chunk, file #0, 476, 4024, now links against thread library when necessary (e.g.^J PF_RING's libpcap) (Jon Siwek)^J^J * Install binaries with an RPATH (Jon Siwek)^J^J * Workaround for FreeBSD CMake port missing debug flags (Jon Siwek)^J^J * Rewrite of the update-changes script. (Robin Sommer)^J^J0.1-1 | 2011-06-14 21:12:41 -0700^J^J * Add a script for generating Mozilla's CA list for the SSL analyzer.^J (Seth Hall)^J^J0.1 | 2011-04-01 16:28:22 -0700^J^J * Converting build process to CMake. (Jon Siwek)^J -file_stream, file #0, 205, ^J * Removing cf/hf/ca-* from distribution. The README has a note where^J to find them now. (Robin Sommer)^J^J * General cleanup. (Robin Sommer)^J^J * Initial import of bro/aux from SVN r7088. (Jon Siwek)^J -file_chunk, file #0, 205, 4500, ^J * Removing cf/hf/ca-* from distribution. The README has a note where^J to find them now. (Robin Sommer)^J^J * General cleanup. (Robin Sommer)^J^J * Initial import of bro/aux from SVN r7088. (Jon Siwek)^J +file_stream, file #0, 1146, ^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J +file_chunk, file #0, 1146, 0, ^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J +file_stream, file #0, 1448, rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-release script, which could pick out the wrong^J tag. (Robin Sommer)^J^J0.21 | 2011-10-27 17:40:45 -0700^J^J * Fixing bro-cut's usage message and argument error handling. (Robin Sommer)^J^J * Bugfix in update-changes script. (Robin Sommer)^J^J * update-changes now ignores commits it did itself. (Robin Sommer)^J^J * Fix a bug in the update-changes script. (Robin Sommer)^J^J * bro-cut now always installs to $prefix/bin by `make install`. (Jon Siwek)^J^J * Options to adjust time format for bro-cut. (Robin Sommer)^J^J The default with -d is now ISO format. The new option "-D "^J specifies a custom strftime()-style format string. Alternatively,^J the environment variable BRO_CUT_TIMEFMT can set the format as^J well.^J^J * bro-cut now understands the field separator header. (Robin Sommer)^J^J * Renaming options -h/-H -> -c/-C, and doing some general cleanup.^J^J0.2 | 2011-10-25 19:53:57 -0700^J^J * Adding support for replacing version string in a setup.py. (Robin^J Sommer)^J^J * Change generated root cert DN indices format for RFC2253^J compliance. (Jon Siwek)^J^J * New tool devel-tool +file_chunk, file #0, 1448, 1146, rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-release script, which could pick out the wrong^J tag. (Robin Sommer)^J^J0.21 | 2011-10-27 17:40:45 -0700^J^J * Fixing bro-cut's usage message and argument error handling. (Robin Sommer)^J^J * Bugfix in update-changes script. (Robin Sommer)^J^J * update-changes now ignores commits it did itself. (Robin Sommer)^J^J * Fix a bug in the update-changes script. (Robin Sommer)^J^J * bro-cut now always installs to $prefix/bin by `make install`. (Jon Siwek)^J^J * Options to adjust time format for bro-cut. (Robin Sommer)^J^J The default with -d is now ISO format. The new option "-D "^J specifies a custom strftime()-style format string. Alternatively,^J the environment variable BRO_CUT_TIMEFMT can set the format as^J well.^J^J * bro-cut now understands the field separator header. (Robin Sommer)^J^J * Renaming options -h/-H -> -c/-C, and doing some general cleanup.^J^J0.2 | 2011-10-25 19:53:57 -0700^J^J * Adding support for replacing version string in a setup.py. (Robin^J Sommer)^J^J * Change generated root cert DN indices format for RFC2253^J compliance. (Jon Siwek)^J^J * New tool devel-tool +file_stream, file #0, 1448, s/check-release to run before making releases.^J (Robin Sommer)^J^J * devel-tools/update-changes gets a new option -a to amend to^J previous commit if possible. Default is now not to (used to be the^J opposite). (Robin Sommer)^J^J * Change Mozilla trust root generation to index certs by subject DN. (Jon Siwek)^J^J * Change distclean to only remove build dir. (Jon Siwek)^J^J * Make dist now cleans the copied source (Jon Siwek)^J^J * Small tweak to make-release for forced git-clean. (Jon Siwek)^J^J * Fix to not let updates scripts loose their executable permissions.^J (Robin Sommer)^J^J * devel-tools/update-changes now looks for a 'release' tag to^J idenfify the stable version, and 'beta' for the beta versions.^J (Robin Sommer).^J^J * Distribution cleanup. (Robin Sommer)^J^J * New script devel-tools/make-release to create source tar balls.^J (Robin Sommer)^J^J * Removing bdcat. With the new log format, this isn't very useful^J anymore. (Robin Sommer)^J^J * Adding script that shows all pending git fastpath commits. (Robin^J Sommer)^J^J * Script to measure CPU time by loading an increasing set of^J scripts. (Robin Sommer)^J^J * extract-conn script now deals wit *.gz files. (Robin Sommer)^J^J * Tiny update to output a valid CA list file for SSL cert^J validation. (Seth Hall)^J^J * Adding "install-aux" target. Addresses #622. (Jon Siwek)^J^J * Distribution cleanup. (Jon Siwek and Robin Sommer)^J^J * FindPCAP now links against +file_chunk, file #0, 1448, 2594, s/check-release to run before making releases.^J (Robin Sommer)^J^J * devel-tools/update-changes gets a new option -a to amend to^J previous commit if possible. Default is now not to (used to be the^J opposite). (Robin Sommer)^J^J * Change Mozilla trust root generation to index certs by subject DN. (Jon Siwek)^J^J * Change distclean to only remove build dir. (Jon Siwek)^J^J * Make dist now cleans the copied source (Jon Siwek)^J^J * Small tweak to make-release for forced git-clean. (Jon Siwek)^J^J * Fix to not let updates scripts loose their executable permissions.^J (Robin Sommer)^J^J * devel-tools/update-changes now looks for a 'release' tag to^J idenfify the stable version, and 'beta' for the beta versions.^J (Robin Sommer).^J^J * Distribution cleanup. (Robin Sommer)^J^J * New script devel-tools/make-release to create source tar balls.^J (Robin Sommer)^J^J * Removing bdcat. With the new log format, this isn't very useful^J anymore. (Robin Sommer)^J^J * Adding script that shows all pending git fastpath commits. (Robin^J Sommer)^J^J * Script to measure CPU time by loading an increasing set of^J scripts. (Robin Sommer)^J^J * extract-conn script now deals wit *.gz files. (Robin Sommer)^J^J * Tiny update to output a valid CA list file for SSL cert^J validation. (Seth Hall)^J^J * Adding "install-aux" target. Addresses #622. (Jon Siwek)^J^J * Distribution cleanup. (Jon Siwek and Robin Sommer)^J^J * FindPCAP now links against +file_stream, file #0, 663, thread library when necessary (e.g.^J PF_RING's libpcap) (Jon Siwek)^J^J * Install binaries with an RPATH (Jon Siwek)^J^J * Workaround for FreeBSD CMake port missing debug flags (Jon Siwek)^J^J * Rewrite of the update-changes script. (Robin Sommer)^J^J0.1-1 | 2011-06-14 21:12:41 -0700^J^J * Add a script for generating Mozilla's CA list for the SSL analyzer.^J (Seth Hall)^J^J0.1 | 2011-04-01 16:28:22 -0700^J^J * Converting build process to CMake. (Jon Siwek)^J^J * Removing cf/hf/ca-* from distribution. The README has a note where^J to find them now. (Robin Sommer)^J^J * General cleanup. (Robin Sommer)^J^J * Initial import of bro/aux from SVN r7088. (Jon Siwek)^J +file_chunk, file #0, 663, 4042, thread library when necessary (e.g.^J PF_RING's libpcap) (Jon Siwek)^J^J * Install binaries with an RPATH (Jon Siwek)^J^J * Workaround for FreeBSD CMake port missing debug flags (Jon Siwek)^J^J * Rewrite of the update-changes script. (Robin Sommer)^J^J0.1-1 | 2011-06-14 21:12:41 -0700^J^J * Add a script for generating Mozilla's CA list for the SSL analyzer.^J (Seth Hall)^J^J0.1 | 2011-04-01 16:28:22 -0700^J^J * Converting build process to CMake. (Jon Siwek)^J^J * Removing cf/hf/ca-* from distribution. The README has a note where^J to find them now. (Robin Sommer)^J^J * General cleanup. (Robin Sommer)^J^J * Initial import of bro/aux from SVN r7088. (Jon Siwek)^J FILE_STATE_REMOVE file #0, 4705, 0 [orig_h=141.142.228.5, orig_p=59856/tcp, resp_h=192.150.187.43, resp_p=80/tcp] diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.bifs.register_mime_type/files.log b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.bifs.register_mime_type/files.log index 44a90b9ee6..b836d14e47 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.bifs.register_mime_type/files.log +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.bifs.register_mime_type/files.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path files -#open 2014-07-21-14-26-07 +#open 2014-09-08-21-50-32 #fields ts fuid tx_hosts rx_hosts conn_uids source depth analyzers mime_type filename duration local_orig is_orig seen_bytes total_bytes missing_bytes overflow_bytes timedout parent_fuid md5 sha1 sha256 extracted #types time string set[addr] set[addr] set[string] string count set[string] string string interval bool bool count count count count bool string string string string string -1362692527.009721 FakNcS1Jfe01uljb3 192.150.187.43 141.142.228.5 CXWv6p3arKYeMETxOg HTTP 0 MD5 text/plain - 0.000054 - F 4705 4705 0 0 F - 397168fd09991a0e712254df7bc639ac - - - -#close 2014-07-21-14-26-07 +1362692527.009512 FakNcS1Jfe01uljb3 192.150.187.43 141.142.228.5 CXWv6p3arKYeMETxOg HTTP 0 MD5 text/plain - 0.000263 - F 4705 4705 0 0 F - 397168fd09991a0e712254df7bc639ac - - - +#close 2014-09-08-21-50-32 diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/1-file b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/1-file index 77356c3140..30d74d2584 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/1-file +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/1-file @@ -1 +1 @@ -test +test \ No newline at end of file diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/2-file b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/2-file index ac2a9e002d..d606037cb2 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/2-file +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/2-file @@ -1 +1 @@ -test2 +test2 \ No newline at end of file diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/3-file b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/3-file index ae48ec8c20..29f446afe2 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/3-file +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/3-file @@ -1 +1 @@ -test3 +test3 \ No newline at end of file diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/out b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/out index b22c8fe886..0bf8d6a0c9 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/out +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.http.multipart/out @@ -1,39 +1,39 @@ FILE_NEW file #0, 0, 0 FILE_BOF_BUFFER -test^M^J +test FILE_OVER_NEW_CONNECTION FILE_STATE_REMOVE -file #0, 6, 0 +file #0, 4, 0 [orig_h=141.142.228.5, orig_p=57262/tcp, resp_h=54.243.88.146, resp_p=80/tcp] source: HTTP -MD5: 9f06243abcb89c70e0c331c61d871fa7 -SHA1: fde773a18bb29f5ed65e6f0a7aa717fd1fa485d4 -SHA256: 837ccb607e312b170fac7383d7ccfd61fa5072793f19a25e75fbacb56539b86b +MD5: 098f6bcd4621d373cade4e832627b4f6 +SHA1: a94a8fe5ccb19ba61c4c0873d391e987982fbbd3 +SHA256: 9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08 FILE_NEW file #1, 0, 0 FILE_BOF_BUFFER -test2^M^J +test2 FILE_OVER_NEW_CONNECTION FILE_STATE_REMOVE -file #1, 7, 0 +file #1, 5, 0 [orig_h=141.142.228.5, orig_p=57262/tcp, resp_h=54.243.88.146, resp_p=80/tcp] source: HTTP -MD5: d68af81ef370b3873d50f09140068810 -SHA1: 51a7b6f2d91f6a87822dc04560f2972bc14fc97e -SHA256: de0edd0ac4a705aff70f34734e90a1d0a1d8b76abe4bb53f3ea934bc105b3b17 +MD5: ad0234829205b9033196ba818f7a872b +SHA1: 109f4b3c50d7b0df729d299bc6f8e9ef9066971f +SHA256: 60303ae22b998861bce3b28f33eec1be758a213c86c93c076dbe9f558c11c752 FILE_NEW file #2, 0, 0 FILE_BOF_BUFFER -test3^M^J +test3 FILE_OVER_NEW_CONNECTION FILE_STATE_REMOVE -file #2, 7, 0 +file #2, 5, 0 [orig_h=141.142.228.5, orig_p=57262/tcp, resp_h=54.243.88.146, resp_p=80/tcp] source: HTTP -MD5: 1a3d75d44753ad246f0bd333cdaf08b0 -SHA1: 4f98809ab09272dfcc58266e3f23ae2393f70e76 -SHA256: 018c67a2c30ed9977e1dddfe98cac542165dac355cf9764c91a362613e752933 +MD5: 8ad8757baa8564dc136c1e07507f4a98 +SHA1: 3ebfa301dc59196f18593c45e519287a23297589 +SHA256: fd61a03af4f77d870fc21e05e7e80678095c92d808cfb3b5c279ee04c74aca13 FILE_NEW file #3, 0, 0 FILE_BOF_BUFFER diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.logging/files.log b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.logging/files.log index cc185a4f1b..daf862e3b9 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.logging/files.log +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.logging/files.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path files -#open 2014-04-01-23-13-35 +#open 2014-09-08-21-55-01 #fields ts fuid tx_hosts rx_hosts conn_uids source depth analyzers mime_type filename duration local_orig is_orig seen_bytes total_bytes missing_bytes overflow_bytes timedout parent_fuid md5 sha1 sha256 extracted #types time string set[addr] set[addr] set[string] string count set[string] string string interval bool bool count count count count bool string string string string string -1362692527.009721 FakNcS1Jfe01uljb3 192.150.187.43 141.142.228.5 CXWv6p3arKYeMETxOg HTTP 0 SHA256,DATA_EVENT,MD5,EXTRACT,SHA1 text/plain - 0.000054 - F 4705 4705 0 0 F - 397168fd09991a0e712254df7bc639ac 1dd7ac0398df6cbc0696445a91ec681facf4dc47 4e7c7ef0984119447e743e3ec77e1de52713e345cde03fe7df753a35849bed18 FakNcS1Jfe01uljb3-file -#close 2014-04-01-23-13-35 +1362692527.009512 FakNcS1Jfe01uljb3 192.150.187.43 141.142.228.5 CXWv6p3arKYeMETxOg HTTP 0 SHA256,DATA_EVENT,MD5,EXTRACT,SHA1 text/plain - 0.000263 - F 4705 4705 0 0 F - 397168fd09991a0e712254df7bc639ac 1dd7ac0398df6cbc0696445a91ec681facf4dc47 4e7c7ef0984119447e743e3ec77e1de52713e345cde03fe7df753a35849bed18 FakNcS1Jfe01uljb3-file +#close 2014-09-08-21-55-01 diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/out b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/out index 1d54e9a2ac..44c240c7ee 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/out +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/out @@ -6,12 +6,12 @@ MIME_TYPE text/plain FILE_OVER_NEW_CONNECTION FILE_STATE_REMOVE -file #0, 79, 0 +file #0, 77, 0 [orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp] source: SMTP -MD5: 92bca2e6cdcde73647125da7dccbdd07 -SHA1: b7e497be8a9f5e2c4b6980fceb015360f98f4a13 -SHA256: 785a8a044d1454ec88837108f443bbb30cc4f529393ffd57118261036bfe59f5 +MD5: 58aff3af22807bc5f4b6357c0038256c +SHA1: c39dc8cd0f8d8b1f7fc8b362c41e69fdf20f668a +SHA256: 8d057f3af311c20675eea767a9df5fa31ff3597c6d5d50fd0cdc34766c40204d FILE_NEW file #1, 0, 0 FILE_BOF_BUFFER diff --git a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/thefile0 b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/thefile0 index f4dd7d22f4..0b84e1fd86 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/thefile0 +++ b/testing/btest/Baseline/scripts.base.frameworks.file-analysis.smtp/thefile0 @@ -10,4 +10,3 @@ Find the attachment GPS - diff --git a/testing/btest/Baseline/scripts.base.frameworks.intel.cluster-transparency/manager-1.intel.log b/testing/btest/Baseline/scripts.base.frameworks.intel.cluster-transparency/manager-1.intel.log index cd314ab408..ba19f4e8d7 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.intel.cluster-transparency/manager-1.intel.log +++ b/testing/btest/Baseline/scripts.base.frameworks.intel.cluster-transparency/manager-1.intel.log @@ -3,8 +3,8 @@ #empty_field (empty) #unset_field - #path intel -#open 2014-04-01-23-13-48 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc seen.indicator seen.indicator_type seen.where sources -#types time string addr port addr port string string string string enum enum set[string] -1396394028.821227 - - - - - - - - 123.123.123.123 Intel::ADDR Intel::IN_ANYWHERE worker-1 -#close 2014-04-01-23-13-58 +#open 2014-09-23-16-13-39 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc seen.indicator seen.indicator_type seen.where seen.node sources +#types time string addr port addr port string string string string enum enum string set[string] +1411488819.555114 - - - - - - - - 123.123.123.123 Intel::ADDR Intel::IN_ANYWHERE worker-2 worker-1 +#close 2014-09-23-16-13-49 diff --git a/testing/btest/Baseline/scripts.base.frameworks.intel.input-and-match/broproc.intel.log b/testing/btest/Baseline/scripts.base.frameworks.intel.input-and-match/broproc.intel.log index c1c81a662b..33c97c0c1e 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.intel.input-and-match/broproc.intel.log +++ b/testing/btest/Baseline/scripts.base.frameworks.intel.input-and-match/broproc.intel.log @@ -3,9 +3,9 @@ #empty_field (empty) #unset_field - #path intel -#open 2014-04-01-23-14-04 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc seen.indicator seen.indicator_type seen.where sources -#types time string addr port addr port string string string string enum enum set[string] -1396394044.377145 - - - - - - - - e@mail.com Intel::EMAIL SOMEWHERE source1 -1396394044.377145 - - - - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE source1 -#close 2014-04-01-23-14-04 +#open 2014-09-23-16-14-49 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc seen.indicator seen.indicator_type seen.where seen.node sources +#types time string addr port addr port string string string string enum enum string set[string] +1411488889.571819 - - - - - - - - e@mail.com Intel::EMAIL SOMEWHERE bro source1 +1411488889.571819 - - - - - - - - 1.2.3.4 Intel::ADDR SOMEWHERE bro source1 +#close 2014-09-23-16-14-49 diff --git a/testing/btest/Baseline/scripts.base.frameworks.intel.read-file-dist-cluster/manager-1.intel.log b/testing/btest/Baseline/scripts.base.frameworks.intel.read-file-dist-cluster/manager-1.intel.log index f7d62eb737..d8e2d43674 100644 --- a/testing/btest/Baseline/scripts.base.frameworks.intel.read-file-dist-cluster/manager-1.intel.log +++ b/testing/btest/Baseline/scripts.base.frameworks.intel.read-file-dist-cluster/manager-1.intel.log @@ -3,11 +3,11 @@ #empty_field (empty) #unset_field - #path intel -#open 2014-04-01-23-14-12 -#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc seen.indicator seen.indicator_type seen.where sources -#types time string addr port addr port string string string string enum enum set[string] -1396394052.512481 - - - - - - - - 1.2.3.4 Intel::ADDR Intel::IN_A_TEST source1 -1396394052.512481 - - - - - - - - e@mail.com Intel::EMAIL Intel::IN_A_TEST source1 -1396394053.554897 - - - - - - - - 1.2.3.4 Intel::ADDR Intel::IN_A_TEST source1 -1396394053.554897 - - - - - - - - e@mail.com Intel::EMAIL Intel::IN_A_TEST source1 -#close 2014-04-01-23-14-21 +#open 2014-09-23-16-15-00 +#fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p fuid file_mime_type file_desc seen.indicator seen.indicator_type seen.where seen.node sources +#types time string addr port addr port string string string string enum enum string set[string] +1411488900.900403 - - - - - - - - 1.2.3.4 Intel::ADDR Intel::IN_A_TEST worker-1 source1 +1411488900.900403 - - - - - - - - e@mail.com Intel::EMAIL Intel::IN_A_TEST worker-1 source1 +1411488901.923543 - - - - - - - - 1.2.3.4 Intel::ADDR Intel::IN_A_TEST worker-2 source1 +1411488901.923543 - - - - - - - - e@mail.com Intel::EMAIL Intel::IN_A_TEST worker-2 source1 +#close 2014-09-23-16-15-09 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml deleted file mode 100644 index a1e65c254e..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.options/ssh.ds.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out deleted file mode 100644 index 94f25c37f4..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.rotate/out +++ /dev/null @@ -1,290 +0,0 @@ -test.2011-03-07-03-00-05.ds test 11-03-07_03.00.05 11-03-07_04.00.05 0 dataseries -test.2011-03-07-04-00-05.ds test 11-03-07_04.00.05 11-03-07_05.00.05 0 dataseries -test.2011-03-07-05-00-05.ds test 11-03-07_05.00.05 11-03-07_06.00.05 0 dataseries -test.2011-03-07-06-00-05.ds test 11-03-07_06.00.05 11-03-07_07.00.05 0 dataseries -test.2011-03-07-07-00-05.ds test 11-03-07_07.00.05 11-03-07_08.00.05 0 dataseries -test.2011-03-07-08-00-05.ds test 11-03-07_08.00.05 11-03-07_09.00.05 0 dataseries -test.2011-03-07-09-00-05.ds test 11-03-07_09.00.05 11-03-07_10.00.05 0 dataseries -test.2011-03-07-10-00-05.ds test 11-03-07_10.00.05 11-03-07_11.00.05 0 dataseries -test.2011-03-07-11-00-05.ds test 11-03-07_11.00.05 11-03-07_12.00.05 0 dataseries -test.2011-03-07-12-00-05.ds test 11-03-07_12.00.05 11-03-07_12.59.55 1 dataseries -> test.2011-03-07-03-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299466805.000000 10.0.0.1 20 10.0.0.2 1024 -1299470395.000000 10.0.0.2 20 10.0.0.3 0 -> test.2011-03-07-04-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299470405.000000 10.0.0.1 20 10.0.0.2 1025 -1299473995.000000 10.0.0.2 20 10.0.0.3 1 -> test.2011-03-07-05-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299474005.000000 10.0.0.1 20 10.0.0.2 1026 -1299477595.000000 10.0.0.2 20 10.0.0.3 2 -> test.2011-03-07-06-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299477605.000000 10.0.0.1 20 10.0.0.2 1027 -1299481195.000000 10.0.0.2 20 10.0.0.3 3 -> test.2011-03-07-07-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299481205.000000 10.0.0.1 20 10.0.0.2 1028 -1299484795.000000 10.0.0.2 20 10.0.0.3 4 -> test.2011-03-07-08-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299484805.000000 10.0.0.1 20 10.0.0.2 1029 -1299488395.000000 10.0.0.2 20 10.0.0.3 5 -> test.2011-03-07-09-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299488405.000000 10.0.0.1 20 10.0.0.2 1030 -1299491995.000000 10.0.0.2 20 10.0.0.3 6 -> test.2011-03-07-10-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299492005.000000 10.0.0.1 20 10.0.0.2 1031 -1299495595.000000 10.0.0.2 20 10.0.0.3 7 -> test.2011-03-07-11-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299495605.000000 10.0.0.1 20 10.0.0.2 1032 -1299499195.000000 10.0.0.2 20 10.0.0.3 8 -> test.2011-03-07-12-00-05.ds -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='test' -t id.orig_h id.orig_p id.resp_h id.resp_p -1299499205.000000 10.0.0.1 20 10.0.0.2 1033 -1299502795.000000 10.0.0.2 20 10.0.0.3 9 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt deleted file mode 100644 index 225217faea..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.test-logging/ssh.ds.txt +++ /dev/null @@ -1,34 +0,0 @@ -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='ssh' -t id.orig_h id.orig_p id.resp_h id.resp_p status country -1342748962.493341 1.2.3.4 1234 2.3.4.5 80 success unknown -1342748962.493341 1.2.3.4 1234 2.3.4.5 80 failure US -1342748962.493341 1.2.3.4 1234 2.3.4.5 80 failure UK -1342748962.493341 1.2.3.4 1234 2.3.4.5 80 success BR -1342748962.493341 1.2.3.4 1234 2.3.4.5 80 failure MX diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt deleted file mode 100644 index a832005c83..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.time-as-int/conn.ds.txt +++ /dev/null @@ -1,89 +0,0 @@ -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='conn' -ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents -1300475167096535 CXWv6p3arKYeMETxOg 141.142.220.202 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 73 0 0 -1300475167097012 CjhGID4nQcgTWjvg4c fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp dns 0 0 0 S0 F 0 D 1 199 0 0 -1300475167099816 CCvvfg3TEfuqmmG4bh 141.142.220.50 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 179 0 0 -1300475168853899 CPbrpk1qSsw6ESzHV4 141.142.220.118 43927 141.142.2.2 53 udp dns 435 38 89 SF F 0 Dd 1 66 1 117 -1300475168854378 C6pKV8GSxOnSLghOa 141.142.220.118 37676 141.142.2.2 53 udp dns 420 52 99 SF F 0 Dd 1 80 1 127 -1300475168854837 CIPOse170MGiRM1Qf4 141.142.220.118 40526 141.142.2.2 53 udp dns 391 38 183 SF F 0 Dd 1 66 1 211 -1300475168857956 CMXxB5GvmoxJFXdTa 141.142.220.118 32902 141.142.2.2 53 udp dns 317 38 89 SF F 0 Dd 1 66 1 117 -1300475168858306 Caby8b1slFea8xwSmb 141.142.220.118 59816 141.142.2.2 53 udp dns 343 52 99 SF F 0 Dd 1 80 1 127 -1300475168858713 Che1bq3i2rO3KD1Syg 141.142.220.118 59714 141.142.2.2 53 udp dns 375 38 183 SF F 0 Dd 1 66 1 211 -1300475168891644 CEle3f3zno26fFZkrh 141.142.220.118 58206 141.142.2.2 53 udp dns 339 38 89 SF F 0 Dd 1 66 1 117 -1300475168892037 CwSkQu4eWZCH7OONC1 141.142.220.118 38911 141.142.2.2 53 udp dns 334 52 99 SF F 0 Dd 1 80 1 127 -1300475168892414 CfTOmO0HKorjr8Zp7 141.142.220.118 59746 141.142.2.2 53 udp dns 420 38 183 SF F 0 Dd 1 66 1 211 -1300475168893988 Cab0vO1xNYSS2hJkle 141.142.220.118 45000 141.142.2.2 53 udp dns 384 38 89 SF F 0 Dd 1 66 1 117 -1300475168894422 Cx2FqO23omNawSNrxj 141.142.220.118 48479 141.142.2.2 53 udp dns 316 52 99 SF F 0 Dd 1 80 1 127 -1300475168894787 Cx3C534wEyF3OvvcQe 141.142.220.118 48128 141.142.2.2 53 udp dns 422 38 183 SF F 0 Dd 1 66 1 211 -1300475168901749 CUKS0W3HFYOnBqSE5e 141.142.220.118 56056 141.142.2.2 53 udp dns 402 36 131 SF F 0 Dd 1 64 1 159 -1300475168902195 CRrfvP2lalMAYOCLhj 141.142.220.118 55092 141.142.2.2 53 udp dns 374 36 198 SF F 0 Dd 1 64 1 226 -1300475169899438 CojBOU3CXcLHl1r6x1 141.142.220.44 5353 224.0.0.251 5353 udp dns 0 0 0 S0 F 0 D 1 85 0 0 -1300475170862384 CJzVQRGJrX6V15ik7 141.142.220.226 137 141.142.220.255 137 udp dns 2613016 350 0 S0 F 0 D 7 546 0 0 -1300475171675372 ClAbxY1nmdjCuo0Le2 fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 100096 66 0 S0 F 0 D 2 162 0 0 -1300475171677081 CwG0BF1VXE0gWgs78 141.142.220.226 55131 224.0.0.252 5355 udp dns 100020 66 0 S0 F 0 D 2 122 0 0 -1300475173116749 CisNaL1Cm73CiNOmcg fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 99801 66 0 S0 F 0 D 2 162 0 0 -1300475173117362 CBQnJn22qN8TOeeZil 141.142.220.226 55671 224.0.0.252 5355 udp dns 99848 66 0 S0 F 0 D 2 122 0 0 -1300475173153679 CbEsuD3dgDDngdlbKf 141.142.220.238 56641 141.142.220.255 137 udp dns 0 0 0 S0 F 0 D 1 78 0 0 -1300475168859163 C3SfNE4BWaU4aSuwkc 141.142.220.118 49998 208.80.152.3 80 tcp http 215893 1130 734 S1 F 0 ShADad 6 1450 4 950 -1300475168652003 CsRx2w45OKnoww6xl4 141.142.220.118 35634 208.80.152.2 80 tcp 61328 463 350 OTH F 0 DdA 2 567 1 402 -1300475168895267 CkDsfG2YIeWJmXWNWj 141.142.220.118 50001 208.80.152.3 80 tcp http 227283 1178 734 S1 F 0 ShADad 6 1498 4 950 -1300475168902635 Cn78a440HlxuyZKs6f 141.142.220.118 35642 208.80.152.2 80 tcp http 120040 534 412 S1 F 0 ShADad 4 750 3 576 -1300475168892936 CyAhVIzHqb7t7kv28 141.142.220.118 50000 208.80.152.3 80 tcp http 229603 1148 734 S1 F 0 ShADad 6 1468 4 950 -1300475168855305 C7XEbhP654jzLoe3a 141.142.220.118 49996 208.80.152.3 80 tcp http 218501 1171 733 S1 F 0 ShADad 6 1491 4 949 -1300475168892913 CzA03V1VcgagLjnO92 141.142.220.118 49999 208.80.152.3 80 tcp http 220960 1137 733 S1 F 0 ShADad 6 1457 4 949 -1300475169780331 CUof3F2yAIid8QS3dk 141.142.220.235 6705 173.192.163.128 80 tcp 0 0 0 OTH F 0 h 0 0 1 48 -1300475168724007 CRJuHdVW0XPVINV8a 141.142.220.118 48649 208.80.152.118 80 tcp http 119904 525 232 S1 F 0 ShADad 4 741 3 396 -1300475168855330 CJ3xTn1c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp http 219720 1125 734 S1 F 0 ShADad 6 1445 4 950 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt deleted file mode 100644 index afb44e36eb..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/conn.ds.txt +++ /dev/null @@ -1,89 +0,0 @@ -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='conn' -ts uid id.orig_h id.orig_p id.resp_h id.resp_p proto service duration orig_bytes resp_bytes conn_state local_orig missed_bytes history orig_pkts orig_ip_bytes resp_pkts resp_ip_bytes tunnel_parents -1300475167.096535 CXWv6p3arKYeMETxOg 141.142.220.202 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 73 0 0 -1300475167.097012 CjhGID4nQcgTWjvg4c fe80::217:f2ff:fed7:cf65 5353 ff02::fb 5353 udp dns 0.000000 0 0 S0 F 0 D 1 199 0 0 -1300475167.099816 CCvvfg3TEfuqmmG4bh 141.142.220.50 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 179 0 0 -1300475168.853899 CPbrpk1qSsw6ESzHV4 141.142.220.118 43927 141.142.2.2 53 udp dns 0.000435 38 89 SF F 0 Dd 1 66 1 117 -1300475168.854378 C6pKV8GSxOnSLghOa 141.142.220.118 37676 141.142.2.2 53 udp dns 0.000420 52 99 SF F 0 Dd 1 80 1 127 -1300475168.854837 CIPOse170MGiRM1Qf4 141.142.220.118 40526 141.142.2.2 53 udp dns 0.000392 38 183 SF F 0 Dd 1 66 1 211 -1300475168.857956 CMXxB5GvmoxJFXdTa 141.142.220.118 32902 141.142.2.2 53 udp dns 0.000317 38 89 SF F 0 Dd 1 66 1 117 -1300475168.858306 Caby8b1slFea8xwSmb 141.142.220.118 59816 141.142.2.2 53 udp dns 0.000343 52 99 SF F 0 Dd 1 80 1 127 -1300475168.858713 Che1bq3i2rO3KD1Syg 141.142.220.118 59714 141.142.2.2 53 udp dns 0.000375 38 183 SF F 0 Dd 1 66 1 211 -1300475168.891644 CEle3f3zno26fFZkrh 141.142.220.118 58206 141.142.2.2 53 udp dns 0.000339 38 89 SF F 0 Dd 1 66 1 117 -1300475168.892037 CwSkQu4eWZCH7OONC1 141.142.220.118 38911 141.142.2.2 53 udp dns 0.000335 52 99 SF F 0 Dd 1 80 1 127 -1300475168.892414 CfTOmO0HKorjr8Zp7 141.142.220.118 59746 141.142.2.2 53 udp dns 0.000421 38 183 SF F 0 Dd 1 66 1 211 -1300475168.893988 Cab0vO1xNYSS2hJkle 141.142.220.118 45000 141.142.2.2 53 udp dns 0.000384 38 89 SF F 0 Dd 1 66 1 117 -1300475168.894422 Cx2FqO23omNawSNrxj 141.142.220.118 48479 141.142.2.2 53 udp dns 0.000317 52 99 SF F 0 Dd 1 80 1 127 -1300475168.894787 Cx3C534wEyF3OvvcQe 141.142.220.118 48128 141.142.2.2 53 udp dns 0.000423 38 183 SF F 0 Dd 1 66 1 211 -1300475168.901749 CUKS0W3HFYOnBqSE5e 141.142.220.118 56056 141.142.2.2 53 udp dns 0.000402 36 131 SF F 0 Dd 1 64 1 159 -1300475168.902195 CRrfvP2lalMAYOCLhj 141.142.220.118 55092 141.142.2.2 53 udp dns 0.000374 36 198 SF F 0 Dd 1 64 1 226 -1300475169.899438 CojBOU3CXcLHl1r6x1 141.142.220.44 5353 224.0.0.251 5353 udp dns 0.000000 0 0 S0 F 0 D 1 85 0 0 -1300475170.862384 CJzVQRGJrX6V15ik7 141.142.220.226 137 141.142.220.255 137 udp dns 2.613017 350 0 S0 F 0 D 7 546 0 0 -1300475171.675372 ClAbxY1nmdjCuo0Le2 fe80::3074:17d5:2052:c324 65373 ff02::1:3 5355 udp dns 0.100096 66 0 S0 F 0 D 2 162 0 0 -1300475171.677081 CwG0BF1VXE0gWgs78 141.142.220.226 55131 224.0.0.252 5355 udp dns 0.100021 66 0 S0 F 0 D 2 122 0 0 -1300475173.116749 CisNaL1Cm73CiNOmcg fe80::3074:17d5:2052:c324 54213 ff02::1:3 5355 udp dns 0.099801 66 0 S0 F 0 D 2 162 0 0 -1300475173.117362 CBQnJn22qN8TOeeZil 141.142.220.226 55671 224.0.0.252 5355 udp dns 0.099849 66 0 S0 F 0 D 2 122 0 0 -1300475173.153679 CbEsuD3dgDDngdlbKf 141.142.220.238 56641 141.142.220.255 137 udp dns 0.000000 0 0 S0 F 0 D 1 78 0 0 -1300475168.859163 C3SfNE4BWaU4aSuwkc 141.142.220.118 49998 208.80.152.3 80 tcp http 0.215893 1130 734 S1 F 0 ShADad 6 1450 4 950 -1300475168.652003 CsRx2w45OKnoww6xl4 141.142.220.118 35634 208.80.152.2 80 tcp 0.061329 463 350 OTH F 0 DdA 2 567 1 402 -1300475168.895267 CkDsfG2YIeWJmXWNWj 141.142.220.118 50001 208.80.152.3 80 tcp http 0.227284 1178 734 S1 F 0 ShADad 6 1498 4 950 -1300475168.902635 Cn78a440HlxuyZKs6f 141.142.220.118 35642 208.80.152.2 80 tcp http 0.120041 534 412 S1 F 0 ShADad 4 750 3 576 -1300475168.892936 CyAhVIzHqb7t7kv28 141.142.220.118 50000 208.80.152.3 80 tcp http 0.229603 1148 734 S1 F 0 ShADad 6 1468 4 950 -1300475168.855305 C7XEbhP654jzLoe3a 141.142.220.118 49996 208.80.152.3 80 tcp http 0.218501 1171 733 S1 F 0 ShADad 6 1491 4 949 -1300475168.892913 CzA03V1VcgagLjnO92 141.142.220.118 49999 208.80.152.3 80 tcp http 0.220961 1137 733 S1 F 0 ShADad 6 1457 4 949 -1300475169.780331 CUof3F2yAIid8QS3dk 141.142.220.235 6705 173.192.163.128 80 tcp 0.000000 0 0 OTH F 0 h 0 0 1 48 -1300475168.724007 CRJuHdVW0XPVINV8a 141.142.220.118 48649 208.80.152.118 80 tcp http 0.119905 525 232 S1 F 0 ShADad 4 741 3 396 -1300475168.855330 CJ3xTn1c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 tcp http 0.219720 1125 734 S1 F 0 ShADad 6 1445 4 950 diff --git a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt b/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt deleted file mode 100644 index eec7031ba7..0000000000 --- a/testing/btest/Baseline/scripts.base.frameworks.logging.dataseries.wikipedia/http.ds.txt +++ /dev/null @@ -1,83 +0,0 @@ -# Extent Types ... - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# Extent, type='http' -ts uid id.orig_h id.orig_p id.resp_h id.resp_p trans_depth method host uri referrer user_agent request_body_len response_body_len status_code status_msg info_code info_msg filename tags username password proxied orig_fuids orig_mime_types resp_fuids resp_mime_types -1300475168.784020 CRJuHdVW0XPVINV8a 141.142.220.118 48649 208.80.152.118 80 1 GET bits.wikimedia.org /skins-1.5/monobook/main.css http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.916018 CJ3xTn1c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/6/63/Wikipedia-logo.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.916183 C7XEbhP654jzLoe3a 141.142.220.118 49996 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/thumb/b/bb/Wikipedia_wordmark.svg/174px-Wikipedia_wordmark.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.918358 C3SfNE4BWaU4aSuwkc 141.142.220.118 49998 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/b/bd/Bookshelf-40x201_6.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.952307 CyAhVIzHqb7t7kv28 141.142.220.118 50000 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/thumb/8/8a/Wikinews-logo.png/35px-Wikinews-logo.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.952296 CzA03V1VcgagLjnO92 141.142.220.118 49999 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/4/4a/Wiktionary-logo-en-35px.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.954820 CkDsfG2YIeWJmXWNWj 141.142.220.118 50001 208.80.152.3 80 1 GET upload.wikimedia.org /wikipedia/commons/thumb/f/fa/Wikiquote-logo.svg/35px-Wikiquote-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.962687 Cn78a440HlxuyZKs6f 141.142.220.118 35642 208.80.152.2 80 1 GET meta.wikimedia.org /images/wikimedia-button.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.975934 CJ3xTn1c4Zw9TmAE05 141.142.220.118 49997 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/f/fa/Wikibooks-logo.svg/35px-Wikibooks-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.976436 C7XEbhP654jzLoe3a 141.142.220.118 49996 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/d/df/Wikispecies-logo.svg/35px-Wikispecies-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475168.979264 C3SfNE4BWaU4aSuwkc 141.142.220.118 49998 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/4/4c/Wikisource-logo.svg/35px-Wikisource-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475169.014619 CyAhVIzHqb7t7kv28 141.142.220.118 50000 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/4/4a/Commons-logo.svg/35px-Commons-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475169.014593 CzA03V1VcgagLjnO92 141.142.220.118 49999 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/9/91/Wikiversity-logo.svg/35px-Wikiversity-logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 -1300475169.014927 CkDsfG2YIeWJmXWNWj 141.142.220.118 50001 208.80.152.3 80 2 GET upload.wikimedia.org /wikipedia/commons/thumb/7/75/Wikimedia_Community_Logo.svg/35px-Wikimedia_Community_Logo.svg.png http://www.wikipedia.org/ Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.15) Gecko/20110303 Ubuntu/10.04 (lucid) Firefox/3.6.15 0 0 304 Not Modified 0 diff --git a/testing/btest/Baseline/scripts.base.protocols.dns.tsig/out b/testing/btest/Baseline/scripts.base.protocols.dns.tsig/out new file mode 100644 index 0000000000..ddeb775ec8 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.dns.tsig/out @@ -0,0 +1,2 @@ +[query=secret-key, qtype=3, alg_name=hmac-md5.sig-alg.reg.int, sig=F\xbd\xbf1\xef^B6\xb8\xeb\xae1u,\x87\xdb^?, time_signed=21513.794, fudge=300.0, orig_id=9703, rr_error=0, is_query=1] +16 diff --git a/testing/btest/Baseline/scripts.base.protocols.http.content-range-gap/extract_files.thefile b/testing/btest/Baseline/scripts.base.protocols.http.content-range-gap/extract_files.thefile new file mode 100644 index 0000000000..fc6cc2271f Binary files /dev/null and b/testing/btest/Baseline/scripts.base.protocols.http.content-range-gap/extract_files.thefile differ diff --git a/testing/btest/Baseline/scripts.base.protocols.http.entity-gap/entity_data b/testing/btest/Baseline/scripts.base.protocols.http.entity-gap/entity_data new file mode 100644 index 0000000000..37d10fb294 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.http.entity-gap/entity_data @@ -0,0 +1,4 @@ +^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J +<1448 byte gap> +s/check-release to run before making releases.^J (Robin Sommer)^J^J * devel-tools/update-changes gets a new option -a to amend to^J previous commit if possible. Default is now not to (used to be the^J opposite). (Robin Sommer)^J^J * Change Mozilla trust root generation to index certs by subject DN. (Jon Siwek)^J^J * Change distclean to only remove build dir. (Jon Siwek)^J^J * Make dist now cleans the copied source (Jon Siwek)^J^J * Small tweak to make-release for forced git-clean. (Jon Siwek)^J^J * Fix to not let updates scripts loose their executable permissions.^J (Robin Sommer)^J^J * devel-tools/update-changes now looks for a 'release' tag to^J idenfify the stable version, and 'beta' for the beta versions.^J (Robin Sommer).^J^J * Distribution cleanup. (Robin Sommer)^J^J * New script devel-tools/make-release to create source tar balls.^J (Robin Sommer)^J^J * Removing bdcat. With the new log format, this isn't very useful^J anymore. (Robin Sommer)^J^J * Adding script that shows all pending git fastpath commits. (Robin^J Sommer)^J^J * Script to measure CPU time by loading an increasing set of^J scripts. (Robin Sommer)^J^J * extract-conn script now deals wit *.gz files. (Robin Sommer)^J^J * Tiny update to output a valid CA list file for SSL cert^J validation. (Seth Hall)^J^J * Adding "install-aux" target. Addresses #622. (Jon Siwek)^J^J * Distribution cleanup. (Jon Siwek and Robin Sommer)^J^J * FindPCAP now links against + thread library when necessary (e.g.^J PF_RING's libpcap) (Jon Siwek)^J^J * Install binaries with an RPATH (Jon Siwek)^J^J * Workaround for FreeBSD CMake port missing debug flags (Jon Siwek)^J^J * Rewrite of the update-changes script. (Robin Sommer)^J^J0.1-1 | 2011-06-14 21:12:41 -0700^J^J * Add a script for generating Mozilla's CA list for the SSL analyzer.^J (Seth Hall)^J^J0.1 | 2011-04-01 16:28:22 -0700^J^J * Converting build process to CMake. (Jon Siwek)^J^J * Removing cf/hf/ca-* from distribution. The README has a note where^J to find them now. (Robin Sommer)^J^J * General cleanup. (Robin Sommer)^J^J * Initial import of bro/aux from SVN r7088. (Jon Siwek)^J diff --git a/testing/btest/Baseline/scripts.base.protocols.http.entity-gap/extract_files.file0 b/testing/btest/Baseline/scripts.base.protocols.http.entity-gap/extract_files.file0 new file mode 100644 index 0000000000..f032ec61d3 Binary files /dev/null and b/testing/btest/Baseline/scripts.base.protocols.http.entity-gap/extract_files.file0 differ diff --git a/testing/btest/Baseline/scripts.base.protocols.http.entity-gap2/entity_data b/testing/btest/Baseline/scripts.base.protocols.http.entity-gap2/entity_data new file mode 100644 index 0000000000..c6e5999e07 --- /dev/null +++ b/testing/btest/Baseline/scripts.base.protocols.http.entity-gap2/entity_data @@ -0,0 +1,4 @@ +^J0.26 | 2012-08-24 15:10:04 -0700^J^J * Fixing update-changes, which could pick the wrong control file. (Robin Sommer)^J^J * Fixing GPG signing script. (Robin Sommer)^J^J0.25 | 2012-08-01 13:55:46 -0500^J^J * Fix configure script to exit with non-zero status on error (Jon Siwek)^J^J0.24 | 2012-07-05 12:50:43 -0700^J^J * Raise minimum required CMake version to 2.6.3 (Jon Siwek)^J^J * Adding script to delete old fully-merged branches. (Robin Sommer)^J^J0.23-2 | 2012-01-25 13:24:01 -0800^J^J * Fix a bro-cut error message. (Daniel Thayer)^J^J0.23 | 2012-01-11 12:16:11 -0800^J^J * Tweaks to release scripts, plus a new one for signing files.^J (Robin Sommer)^J^J0.22 | 2012-01-10 16:45:19 -0800^J^J * Tweaks for OpenBSD support. (Jon Siwek)^J^J * bro-cut extensions and fixes. (Robin Sommer)^J ^J - If no field names are given on the command line, we now pass through^J all fields. Adresses #657.^J^J - Removing some GNUism from awk script. Addresses #653.^J^J - Added option for time output in UTC. Addresses #668.^J^J - Added output field separator option -F. Addresses #649.^J^J - Fixing option -c: only some header lines were passed through^J + rather than all. (Robin Sommer)^J^J * Fix parallel make portability. (Jon Siwek)^J^J0.21-9 | 2011-11-07 05:44:14 -0800^J^J * Fixing compiler warnings. Addresses #388. (Jon Siwek)^J^J0.21-2 | 2011-11-02 18:12:13 -0700^J^J * Fix for misnaming temp file in update-changes script. (Robin Sommer)^J^J0.21-1 | 2011-11-02 18:10:39 -0700^J^J * Little fix for make-release script, which could pick out the wrong^J tag. (Robin Sommer)^J^J0.21 | 2011-10-27 17:40:45 -0700^J^J * Fixing bro-cut's usage message and argument error handling. (Robin Sommer)^J^J * Bugfix in update-changes script. (Robin Sommer)^J^J * update-changes now ignores commits it did itself. (Robin Sommer)^J^J * Fix a bug in the update-changes script. (Robin Sommer)^J^J * bro-cut now always installs to $prefix/bin by `make install`. (Jon Siwek)^J^J * Options to adjust time format for bro-cut. (Robin Sommer)^J^J The default with -d is now ISO format. The new option "-D "^J specifies a custom strftime()-style format string. Alternatively,^J the environment variable BRO_CUT_TIMEFMT can set the format as^J well.^J^J * bro-cut now understands the field separator header. (Robin Sommer)^J^J * Renaming options -h/-H -> -c/-C, and doing some general cleanup.^J^J0.2 | 2011-10-25 19:53:57 -0700^J^J * Adding support for replacing version string in a setup.py. (Robin^J Sommer)^J^J * Change generated root cert DN indices format for RFC2253^J compliance. (Jon Siwek)^J^J * New tool devel-tool +<1448 byte gap> + thread library when necessary (e.g.^J PF_RING's libpcap) (Jon Siwek)^J^J * Install binaries with an RPATH (Jon Siwek)^J^J * Workaround for FreeBSD CMake port missing debug flags (Jon Siwek)^J^J * Rewrite of the update-changes script. (Robin Sommer)^J^J0.1-1 | 2011-06-14 21:12:41 -0700^J^J * Add a script for generating Mozilla's CA list for the SSL analyzer.^J (Seth Hall)^J^J0.1 | 2011-04-01 16:28:22 -0700^J^J * Converting build process to CMake. (Jon Siwek)^J^J * Removing cf/hf/ca-* from distribution. The README has a note where^J to find them now. (Robin Sommer)^J^J * General cleanup. (Robin Sommer)^J^J * Initial import of bro/aux from SVN r7088. (Jon Siwek)^J diff --git a/testing/btest/Baseline/scripts.base.protocols.http.entity-gap2/extract_files.file0 b/testing/btest/Baseline/scripts.base.protocols.http.entity-gap2/extract_files.file0 new file mode 100644 index 0000000000..8eb236cb55 Binary files /dev/null and b/testing/btest/Baseline/scripts.base.protocols.http.entity-gap2/extract_files.file0 differ diff --git a/testing/btest/Baseline/scripts.base.protocols.modbus.exception_handling/modbus.log b/testing/btest/Baseline/scripts.base.protocols.modbus.exception_handling/modbus.log index 407487756d..d51e336b0a 100644 --- a/testing/btest/Baseline/scripts.base.protocols.modbus.exception_handling/modbus.log +++ b/testing/btest/Baseline/scripts.base.protocols.modbus.exception_handling/modbus.log @@ -3,9 +3,13 @@ #empty_field (empty) #unset_field - #path modbus -#open 2013-08-26-19-04-11 +#open 2014-09-11-15-00-05 #fields ts uid id.orig_h id.orig_p id.resp_h id.resp_p func exception #types time string addr port addr port string string 1153491909.414125 CXWv6p3arKYeMETxOg 192.168.66.235 2582 166.161.16.230 502 unknown-156 - +1153491911.997264 CXWv6p3arKYeMETxOg 192.168.66.235 2582 166.161.16.230 502 unknown-160 - 1153491913.013726 CXWv6p3arKYeMETxOg 192.168.66.235 2582 166.161.16.230 502 unknown-162 - -#close 2013-08-26-19-04-11 +1153491923.091742 CXWv6p3arKYeMETxOg 192.168.66.235 2582 166.161.16.230 502 unknown-175 - +1153491923.091742 CXWv6p3arKYeMETxOg 192.168.66.235 2582 166.161.16.230 502 unknown-179 - +1153491923.623460 CXWv6p3arKYeMETxOg 192.168.66.235 2582 166.161.16.230 502 unknown-165 - +#close 2014-09-11-15-00-05 diff --git a/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events-no-args.log b/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events-no-args.log index 6de44b1fbf..cad75c268d 100644 --- a/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events-no-args.log +++ b/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events-no-args.log @@ -64,10 +64,9 @@ 1254722770.692743 mime_begin_entity 1254722770.692743 mime_one_header 1254722770.692743 mime_one_header -1254722770.692786 get_file_handle +1254722770.692743 get_file_handle 1254722770.692786 file_new 1254722770.692786 file_over_new_connection -1254722770.692804 get_file_handle 1254722770.692804 mime_end_entity 1254722770.692804 get_file_handle 1254722770.692804 file_state_remove @@ -79,20 +78,10 @@ 1254722770.692804 mime_one_header 1254722770.692804 mime_one_header 1254722770.692804 mime_one_header -1254722770.692823 get_file_handle +1254722770.692804 get_file_handle 1254722770.692823 file_new 1254722770.692823 file_over_new_connection -1254722770.692823 get_file_handle 1254722770.695115 new_connection -1254722771.469814 get_file_handle -1254722771.494181 get_file_handle -1254722771.494181 get_file_handle -1254722771.494199 get_file_handle -1254722771.834628 get_file_handle -1254722771.834655 get_file_handle -1254722771.834655 get_file_handle -1254722771.858316 get_file_handle -1254722771.858334 get_file_handle 1254722771.858334 mime_end_entity 1254722771.858334 get_file_handle 1254722771.858334 file_state_remove diff --git a/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events.log b/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events.log index b8f576e497..b466b1db49 100644 --- a/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events.log +++ b/testing/btest/Baseline/scripts.policy.misc.dump-events/all-events.log @@ -305,15 +305,15 @@ [2] is_orig: bool = T 1254722770.692743 file_new - [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=F, conns={^J^I[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^I^ISMTP^J^I}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^I^J^I}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^I^J^I}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]^J}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=Hello^M^J^M^J ^M^J^M^JI send u smtp pcap file ^M^J^M^JFind the attachment^M^J^M^J ^M^J^M^JGPS^M^J^M^J^M^J, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=] + [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=F, conns={^J^I[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^I^ISMTP^J^I}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^I^J^I}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^I^J^I}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]^J}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=Hello^M^J^M^J ^M^J^M^JI send u smtp pcap file ^M^J^M^JFind the attachment^M^J^M^J ^M^J^M^JGPS^M^J^M^J, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=, u2_events=] 1254722770.692743 file_over_new_connection - [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=F, conns={^J^I[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^I^ISMTP^J^I}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^I^J^I}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^I^J^I}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]^J}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=Hello^M^J^M^J ^M^J^M^JI send u smtp pcap file ^M^J^M^JFind the attachment^M^J^M^J ^M^J^M^JGPS^M^J^M^J^M^J, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1254722770.692743, fuid=Fel9gs4OtNEV6gUJZ5, tx_hosts={^J^J}, rx_hosts={^J^J}, conn_uids={^J^J}, source=SMTP, depth=0, analyzers={^J^J}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=] + [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=F, conns={^J^I[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^I^ISMTP^J^I}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^I^J^I}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^I^J^I}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]^J}, last_active=1254722770.692743, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=Hello^M^J^M^J ^M^J^M^JI send u smtp pcap file ^M^J^M^JFind the attachment^M^J^M^J ^M^J^M^JGPS^M^J^M^J, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1254722770.692743, fuid=Fel9gs4OtNEV6gUJZ5, tx_hosts={^J^J}, rx_hosts={^J^J}, conn_uids={^J^J}, source=SMTP, depth=0, analyzers={^J^J}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=] [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=] [2] is_orig: bool = F 1254722770.692743 file_state_remove - [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=F, conns={^J^I[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^I^ISMTP^J^I}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^I^J^I}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^I^J^I}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]^J}, last_active=1254722770.692743, seen_bytes=79, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=Hello^M^J^M^J ^M^J^M^JI send u smtp pcap file ^M^J^M^JFind the attachment^M^J^M^J ^M^J^M^JGPS^M^J^M^J^M^J, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1254722770.692743, fuid=Fel9gs4OtNEV6gUJZ5, tx_hosts={^J^I74.53.140.153^J}, rx_hosts={^J^I10.10.1.4^J}, conn_uids={^J^ICjhGID4nQcgTWjvg4c^J}, source=SMTP, depth=3, analyzers={^J^J}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=] + [0] f: fa_file = [id=Fel9gs4OtNEV6gUJZ5, parent_id=, source=SMTP, is_orig=F, conns={^J^I[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^I^ISMTP^J^I}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^I^J^I}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^I^J^I}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=, fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=3], socks=, ssh=, syslog=]^J}, last_active=1254722770.692743, seen_bytes=77, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=Hello^M^J^M^J ^M^J^M^JI send u smtp pcap file ^M^J^M^JFind the attachment^M^J^M^J ^M^J^M^JGPS^M^J^M^J, mime_type=text/plain, mime_types=[[strength=-20, mime=text/plain]], info=[ts=1254722770.692743, fuid=Fel9gs4OtNEV6gUJZ5, tx_hosts={^J^I74.53.140.153^J}, rx_hosts={^J^I10.10.1.4^J}, conn_uids={^J^ICjhGID4nQcgTWjvg4c^J}, source=SMTP, depth=3, analyzers={^J^J}, mime_type=text/plain, filename=, duration=0 secs, local_orig=, is_orig=F, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timedout=F, parent_fuid=, md5=, sha1=, sha256=, x509=, extracted=], u2_events=] 1254722770.692743 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP @@ -331,22 +331,17 @@ [0] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] [1] h: mime_header_rec = [name=CONTENT-TRANSFER-ENCODING, value=quoted-printable] -1254722770.692786 get_file_handle +1254722770.692743 get_file_handle [0] tag: enum = Analyzer::ANALYZER_SMTP - [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] + [1] c: connection = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=1610, state=4, num_pkts=9, num_bytes_ip=518, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.163697, service={^J^ISMTP^J}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^J}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^J}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=] [2] is_orig: bool = F 1254722770.692786 file_new - [0] f: fa_file = [id=Ft4M3f2yMvLlmwtbq9, parent_id=, source=SMTP, is_orig=F, conns={^J^I[[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp]] = [id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], orig=[size=3070, state=4, num_pkts=10, num_bytes_ip=2018, flow_label=0], resp=[size=462, state=4, num_pkts=10, num_bytes_ip=870, flow_label=0], start_time=1254722767.529046, duration=3.16374, service={^J^I^ISMTP^J^I}, addl=, hot=0, history=ShAdDa, uid=CjhGID4nQcgTWjvg4c, tunnel=, dpd=, conn=, extract_orig=F, extract_resp=F, dhcp=, dnp3=, dns=, dns_state=, ftp=, ftp_data_reuse=F, ssl=, http=, http_state=, irc=, modbus=, radius=, snmp=, smtp=[ts=1254722768.219663, uid=CjhGID4nQcgTWjvg4c, id=[orig_h=10.10.1.4, orig_p=1470/tcp, resp_h=74.53.140.153, resp_p=25/tcp], trans_depth=1, helo=GP, mailfrom=, rcptto={^J^I^I^J^I}, date=Mon, 5 Oct 2009 11:36:07 +0530, from="Gurpartap Singh" , to={^J^I^I^J^I}, reply_to=, msg_id=<000301ca4581$ef9e57f0$cedb07d0$@in>, in_reply_to=, subject=SMTP, x_originating_ip=, first_received=, second_received=, last_reply=354 Enter message, ending with "." on a line by itself, path=[74.53.140.153, 10.10.1.4], user_agent=Microsoft Office Outlook 12.0, tls=F, process_received_from=T, has_client_activity=T, entity=[filename=], fuids=[Fel9gs4OtNEV6gUJZ5]], smtp_state=[helo=GP, messages_transferred=0, pending_messages=, mime_depth=4], socks=, ssh=, syslog=]^J}, last_active=1254722770.692786, seen_bytes=0, total_bytes=, missing_bytes=0, overflow_bytes=0, timeout_interval=2.0 mins, bof_buffer_size=1024, bof_buffer=^M^J^M^J^M^J^M^J^M^J